diff --git a/owl-bot-staging/v1/.coveragerc b/owl-bot-staging/v1/.coveragerc new file mode 100644 index 000000000..3487bb298 --- /dev/null +++ b/owl-bot-staging/v1/.coveragerc @@ -0,0 +1,17 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/compute/__init__.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ + # Ignore pkg_resources exceptions. + # This is added at the module level as a safeguard for if someone + # generates the code and tries to run it without pip installing. This + # makes it virtually impossible to test properly. + except pkg_resources.DistributionNotFound diff --git a/owl-bot-staging/v1/MANIFEST.in b/owl-bot-staging/v1/MANIFEST.in new file mode 100644 index 000000000..ac59f919b --- /dev/null +++ b/owl-bot-staging/v1/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/compute *.py +recursive-include google/cloud/compute_v1 *.py diff --git a/owl-bot-staging/v1/README.rst b/owl-bot-staging/v1/README.rst new file mode 100644 index 000000000..f6373e2cb --- /dev/null +++ b/owl-bot-staging/v1/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Compute API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Compute API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/v1/docs/compute_v1/accelerator_types.rst b/owl-bot-staging/v1/docs/compute_v1/accelerator_types.rst new file mode 100644 index 000000000..f865b6225 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/accelerator_types.rst @@ -0,0 +1,10 @@ +AcceleratorTypes +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.accelerator_types + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.accelerator_types.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/addresses.rst b/owl-bot-staging/v1/docs/compute_v1/addresses.rst new file mode 100644 index 000000000..cb88b008e --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/addresses.rst @@ -0,0 +1,10 @@ +Addresses +--------------------------- + +.. automodule:: google.cloud.compute_v1.services.addresses + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.addresses.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/autoscalers.rst b/owl-bot-staging/v1/docs/compute_v1/autoscalers.rst new file mode 100644 index 000000000..59e44e672 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/autoscalers.rst @@ -0,0 +1,10 @@ +Autoscalers +----------------------------- + +.. automodule:: google.cloud.compute_v1.services.autoscalers + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.autoscalers.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/backend_buckets.rst b/owl-bot-staging/v1/docs/compute_v1/backend_buckets.rst new file mode 100644 index 000000000..2a8d5210c --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/backend_buckets.rst @@ -0,0 +1,10 @@ +BackendBuckets +-------------------------------- + +.. automodule:: google.cloud.compute_v1.services.backend_buckets + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.backend_buckets.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/backend_services.rst b/owl-bot-staging/v1/docs/compute_v1/backend_services.rst new file mode 100644 index 000000000..80a321599 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/backend_services.rst @@ -0,0 +1,10 @@ +BackendServices +--------------------------------- + +.. automodule:: google.cloud.compute_v1.services.backend_services + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.backend_services.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/disk_types.rst b/owl-bot-staging/v1/docs/compute_v1/disk_types.rst new file mode 100644 index 000000000..be3cfb6c3 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/disk_types.rst @@ -0,0 +1,10 @@ +DiskTypes +--------------------------- + +.. automodule:: google.cloud.compute_v1.services.disk_types + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.disk_types.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/disks.rst b/owl-bot-staging/v1/docs/compute_v1/disks.rst new file mode 100644 index 000000000..898c491c2 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/disks.rst @@ -0,0 +1,10 @@ +Disks +----------------------- + +.. automodule:: google.cloud.compute_v1.services.disks + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.disks.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/external_vpn_gateways.rst b/owl-bot-staging/v1/docs/compute_v1/external_vpn_gateways.rst new file mode 100644 index 000000000..804891507 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/external_vpn_gateways.rst @@ -0,0 +1,10 @@ +ExternalVpnGateways +------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.external_vpn_gateways + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.external_vpn_gateways.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/firewall_policies.rst b/owl-bot-staging/v1/docs/compute_v1/firewall_policies.rst new file mode 100644 index 000000000..2d4fbd9ff --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/firewall_policies.rst @@ -0,0 +1,10 @@ +FirewallPolicies +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.firewall_policies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.firewall_policies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/firewalls.rst b/owl-bot-staging/v1/docs/compute_v1/firewalls.rst new file mode 100644 index 000000000..24448c98b --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/firewalls.rst @@ -0,0 +1,10 @@ +Firewalls +--------------------------- + +.. automodule:: google.cloud.compute_v1.services.firewalls + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.firewalls.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/forwarding_rules.rst b/owl-bot-staging/v1/docs/compute_v1/forwarding_rules.rst new file mode 100644 index 000000000..6a808d3fc --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/forwarding_rules.rst @@ -0,0 +1,10 @@ +ForwardingRules +--------------------------------- + +.. automodule:: google.cloud.compute_v1.services.forwarding_rules + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.forwarding_rules.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/global_addresses.rst b/owl-bot-staging/v1/docs/compute_v1/global_addresses.rst new file mode 100644 index 000000000..d106676a0 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/global_addresses.rst @@ -0,0 +1,10 @@ +GlobalAddresses +--------------------------------- + +.. automodule:: google.cloud.compute_v1.services.global_addresses + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.global_addresses.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/global_forwarding_rules.rst b/owl-bot-staging/v1/docs/compute_v1/global_forwarding_rules.rst new file mode 100644 index 000000000..710d24590 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/global_forwarding_rules.rst @@ -0,0 +1,10 @@ +GlobalForwardingRules +--------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.global_forwarding_rules + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.global_forwarding_rules.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/global_network_endpoint_groups.rst b/owl-bot-staging/v1/docs/compute_v1/global_network_endpoint_groups.rst new file mode 100644 index 000000000..a13a31a67 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/global_network_endpoint_groups.rst @@ -0,0 +1,10 @@ +GlobalNetworkEndpointGroups +--------------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.global_network_endpoint_groups + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.global_network_endpoint_groups.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/global_operations.rst b/owl-bot-staging/v1/docs/compute_v1/global_operations.rst new file mode 100644 index 000000000..94450a7ee --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/global_operations.rst @@ -0,0 +1,10 @@ +GlobalOperations +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.global_operations + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.global_operations.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/global_organization_operations.rst b/owl-bot-staging/v1/docs/compute_v1/global_organization_operations.rst new file mode 100644 index 000000000..d4e514357 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/global_organization_operations.rst @@ -0,0 +1,10 @@ +GlobalOrganizationOperations +---------------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.global_organization_operations + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.global_organization_operations.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/global_public_delegated_prefixes.rst b/owl-bot-staging/v1/docs/compute_v1/global_public_delegated_prefixes.rst new file mode 100644 index 000000000..c712f7473 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/global_public_delegated_prefixes.rst @@ -0,0 +1,10 @@ +GlobalPublicDelegatedPrefixes +----------------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.global_public_delegated_prefixes + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.global_public_delegated_prefixes.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/health_checks.rst b/owl-bot-staging/v1/docs/compute_v1/health_checks.rst new file mode 100644 index 000000000..4f6fbf590 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/health_checks.rst @@ -0,0 +1,10 @@ +HealthChecks +------------------------------ + +.. automodule:: google.cloud.compute_v1.services.health_checks + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.health_checks.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/image_family_views.rst b/owl-bot-staging/v1/docs/compute_v1/image_family_views.rst new file mode 100644 index 000000000..40c73b6a1 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/image_family_views.rst @@ -0,0 +1,6 @@ +ImageFamilyViews +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.image_family_views + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/images.rst b/owl-bot-staging/v1/docs/compute_v1/images.rst new file mode 100644 index 000000000..a128da7e5 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/images.rst @@ -0,0 +1,10 @@ +Images +------------------------ + +.. automodule:: google.cloud.compute_v1.services.images + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.images.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/instance_group_managers.rst b/owl-bot-staging/v1/docs/compute_v1/instance_group_managers.rst new file mode 100644 index 000000000..eec48ff06 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/instance_group_managers.rst @@ -0,0 +1,10 @@ +InstanceGroupManagers +--------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.instance_group_managers + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.instance_group_managers.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/instance_groups.rst b/owl-bot-staging/v1/docs/compute_v1/instance_groups.rst new file mode 100644 index 000000000..30ccb2bcb --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/instance_groups.rst @@ -0,0 +1,10 @@ +InstanceGroups +-------------------------------- + +.. automodule:: google.cloud.compute_v1.services.instance_groups + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.instance_groups.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/instance_templates.rst b/owl-bot-staging/v1/docs/compute_v1/instance_templates.rst new file mode 100644 index 000000000..1e04745dc --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/instance_templates.rst @@ -0,0 +1,10 @@ +InstanceTemplates +----------------------------------- + +.. automodule:: google.cloud.compute_v1.services.instance_templates + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.instance_templates.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/instances.rst b/owl-bot-staging/v1/docs/compute_v1/instances.rst new file mode 100644 index 000000000..ff79fdecc --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/instances.rst @@ -0,0 +1,10 @@ +Instances +--------------------------- + +.. automodule:: google.cloud.compute_v1.services.instances + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.instances.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/interconnect_attachments.rst b/owl-bot-staging/v1/docs/compute_v1/interconnect_attachments.rst new file mode 100644 index 000000000..6c6d6e907 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/interconnect_attachments.rst @@ -0,0 +1,10 @@ +InterconnectAttachments +----------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.interconnect_attachments + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.interconnect_attachments.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/interconnect_locations.rst b/owl-bot-staging/v1/docs/compute_v1/interconnect_locations.rst new file mode 100644 index 000000000..ed94bf8b0 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/interconnect_locations.rst @@ -0,0 +1,10 @@ +InterconnectLocations +--------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.interconnect_locations + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.interconnect_locations.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/interconnects.rst b/owl-bot-staging/v1/docs/compute_v1/interconnects.rst new file mode 100644 index 000000000..810de28ad --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/interconnects.rst @@ -0,0 +1,10 @@ +Interconnects +------------------------------- + +.. automodule:: google.cloud.compute_v1.services.interconnects + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.interconnects.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/license_codes.rst b/owl-bot-staging/v1/docs/compute_v1/license_codes.rst new file mode 100644 index 000000000..88fe1b72e --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/license_codes.rst @@ -0,0 +1,6 @@ +LicenseCodes +------------------------------ + +.. automodule:: google.cloud.compute_v1.services.license_codes + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/licenses.rst b/owl-bot-staging/v1/docs/compute_v1/licenses.rst new file mode 100644 index 000000000..1782e396a --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/licenses.rst @@ -0,0 +1,10 @@ +Licenses +-------------------------- + +.. automodule:: google.cloud.compute_v1.services.licenses + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.licenses.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/machine_types.rst b/owl-bot-staging/v1/docs/compute_v1/machine_types.rst new file mode 100644 index 000000000..9c536e008 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/machine_types.rst @@ -0,0 +1,10 @@ +MachineTypes +------------------------------ + +.. automodule:: google.cloud.compute_v1.services.machine_types + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.machine_types.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/network_endpoint_groups.rst b/owl-bot-staging/v1/docs/compute_v1/network_endpoint_groups.rst new file mode 100644 index 000000000..0a929be61 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/network_endpoint_groups.rst @@ -0,0 +1,10 @@ +NetworkEndpointGroups +--------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.network_endpoint_groups + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.network_endpoint_groups.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/networks.rst b/owl-bot-staging/v1/docs/compute_v1/networks.rst new file mode 100644 index 000000000..2a74f4714 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/networks.rst @@ -0,0 +1,10 @@ +Networks +-------------------------- + +.. automodule:: google.cloud.compute_v1.services.networks + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.networks.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/node_groups.rst b/owl-bot-staging/v1/docs/compute_v1/node_groups.rst new file mode 100644 index 000000000..ee5f0b254 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/node_groups.rst @@ -0,0 +1,10 @@ +NodeGroups +---------------------------- + +.. automodule:: google.cloud.compute_v1.services.node_groups + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.node_groups.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/node_templates.rst b/owl-bot-staging/v1/docs/compute_v1/node_templates.rst new file mode 100644 index 000000000..1cd30fb6c --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/node_templates.rst @@ -0,0 +1,10 @@ +NodeTemplates +------------------------------- + +.. automodule:: google.cloud.compute_v1.services.node_templates + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.node_templates.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/node_types.rst b/owl-bot-staging/v1/docs/compute_v1/node_types.rst new file mode 100644 index 000000000..4e765f36f --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/node_types.rst @@ -0,0 +1,10 @@ +NodeTypes +--------------------------- + +.. automodule:: google.cloud.compute_v1.services.node_types + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.node_types.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/packet_mirrorings.rst b/owl-bot-staging/v1/docs/compute_v1/packet_mirrorings.rst new file mode 100644 index 000000000..1fce6caed --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/packet_mirrorings.rst @@ -0,0 +1,10 @@ +PacketMirrorings +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.packet_mirrorings + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.packet_mirrorings.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/projects.rst b/owl-bot-staging/v1/docs/compute_v1/projects.rst new file mode 100644 index 000000000..5e076fa50 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/projects.rst @@ -0,0 +1,10 @@ +Projects +-------------------------- + +.. automodule:: google.cloud.compute_v1.services.projects + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.projects.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/public_advertised_prefixes.rst b/owl-bot-staging/v1/docs/compute_v1/public_advertised_prefixes.rst new file mode 100644 index 000000000..54c433688 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/public_advertised_prefixes.rst @@ -0,0 +1,10 @@ +PublicAdvertisedPrefixes +------------------------------------------ + +.. automodule:: google.cloud.compute_v1.services.public_advertised_prefixes + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.public_advertised_prefixes.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/public_delegated_prefixes.rst b/owl-bot-staging/v1/docs/compute_v1/public_delegated_prefixes.rst new file mode 100644 index 000000000..798744764 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/public_delegated_prefixes.rst @@ -0,0 +1,10 @@ +PublicDelegatedPrefixes +----------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.public_delegated_prefixes + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.public_delegated_prefixes.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_autoscalers.rst b/owl-bot-staging/v1/docs/compute_v1/region_autoscalers.rst new file mode 100644 index 000000000..cb7ace393 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_autoscalers.rst @@ -0,0 +1,10 @@ +RegionAutoscalers +----------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_autoscalers + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_autoscalers.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_backend_services.rst b/owl-bot-staging/v1/docs/compute_v1/region_backend_services.rst new file mode 100644 index 000000000..1fcbe4028 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_backend_services.rst @@ -0,0 +1,10 @@ +RegionBackendServices +--------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_backend_services + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_backend_services.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_commitments.rst b/owl-bot-staging/v1/docs/compute_v1/region_commitments.rst new file mode 100644 index 000000000..f88e46dc1 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_commitments.rst @@ -0,0 +1,10 @@ +RegionCommitments +----------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_commitments + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_commitments.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_disk_types.rst b/owl-bot-staging/v1/docs/compute_v1/region_disk_types.rst new file mode 100644 index 000000000..16a33bc3e --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_disk_types.rst @@ -0,0 +1,10 @@ +RegionDiskTypes +--------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_disk_types + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_disk_types.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_disks.rst b/owl-bot-staging/v1/docs/compute_v1/region_disks.rst new file mode 100644 index 000000000..db87ad4b7 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_disks.rst @@ -0,0 +1,10 @@ +RegionDisks +----------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_disks + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_disks.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_health_check_services.rst b/owl-bot-staging/v1/docs/compute_v1/region_health_check_services.rst new file mode 100644 index 000000000..2d28e52c1 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_health_check_services.rst @@ -0,0 +1,10 @@ +RegionHealthCheckServices +------------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_health_check_services + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_health_check_services.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_health_checks.rst b/owl-bot-staging/v1/docs/compute_v1/region_health_checks.rst new file mode 100644 index 000000000..296eab2c4 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_health_checks.rst @@ -0,0 +1,10 @@ +RegionHealthChecks +------------------------------------ + +.. automodule:: google.cloud.compute_v1.services.region_health_checks + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_health_checks.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_instance_group_managers.rst b/owl-bot-staging/v1/docs/compute_v1/region_instance_group_managers.rst new file mode 100644 index 000000000..a6caf22fb --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_instance_group_managers.rst @@ -0,0 +1,10 @@ +RegionInstanceGroupManagers +--------------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_instance_group_managers + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_instance_group_managers.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_instance_groups.rst b/owl-bot-staging/v1/docs/compute_v1/region_instance_groups.rst new file mode 100644 index 000000000..83ddb95ab --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_instance_groups.rst @@ -0,0 +1,10 @@ +RegionInstanceGroups +-------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_instance_groups + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_instance_groups.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_instances.rst b/owl-bot-staging/v1/docs/compute_v1/region_instances.rst new file mode 100644 index 000000000..780a33cd9 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_instances.rst @@ -0,0 +1,6 @@ +RegionInstances +--------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_instances + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_network_endpoint_groups.rst b/owl-bot-staging/v1/docs/compute_v1/region_network_endpoint_groups.rst new file mode 100644 index 000000000..5c386c6fd --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_network_endpoint_groups.rst @@ -0,0 +1,10 @@ +RegionNetworkEndpointGroups +--------------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_network_endpoint_groups + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_network_endpoint_groups.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_notification_endpoints.rst b/owl-bot-staging/v1/docs/compute_v1/region_notification_endpoints.rst new file mode 100644 index 000000000..b69fe5d72 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_notification_endpoints.rst @@ -0,0 +1,10 @@ +RegionNotificationEndpoints +--------------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_notification_endpoints + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_notification_endpoints.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_operations.rst b/owl-bot-staging/v1/docs/compute_v1/region_operations.rst new file mode 100644 index 000000000..df407212c --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_operations.rst @@ -0,0 +1,10 @@ +RegionOperations +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_operations + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_operations.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_ssl_certificates.rst b/owl-bot-staging/v1/docs/compute_v1/region_ssl_certificates.rst new file mode 100644 index 000000000..e12d89e5d --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_ssl_certificates.rst @@ -0,0 +1,10 @@ +RegionSslCertificates +--------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_ssl_certificates + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_ssl_certificates.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_target_http_proxies.rst b/owl-bot-staging/v1/docs/compute_v1/region_target_http_proxies.rst new file mode 100644 index 000000000..187504997 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_target_http_proxies.rst @@ -0,0 +1,10 @@ +RegionTargetHttpProxies +----------------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_target_http_proxies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_target_http_proxies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_target_https_proxies.rst b/owl-bot-staging/v1/docs/compute_v1/region_target_https_proxies.rst new file mode 100644 index 000000000..32ad2918d --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_target_https_proxies.rst @@ -0,0 +1,10 @@ +RegionTargetHttpsProxies +------------------------------------------ + +.. automodule:: google.cloud.compute_v1.services.region_target_https_proxies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_target_https_proxies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/region_url_maps.rst b/owl-bot-staging/v1/docs/compute_v1/region_url_maps.rst new file mode 100644 index 000000000..204cdf69b --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/region_url_maps.rst @@ -0,0 +1,10 @@ +RegionUrlMaps +------------------------------- + +.. automodule:: google.cloud.compute_v1.services.region_url_maps + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.region_url_maps.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/regions.rst b/owl-bot-staging/v1/docs/compute_v1/regions.rst new file mode 100644 index 000000000..4beda5440 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/regions.rst @@ -0,0 +1,10 @@ +Regions +------------------------- + +.. automodule:: google.cloud.compute_v1.services.regions + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.regions.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/reservations.rst b/owl-bot-staging/v1/docs/compute_v1/reservations.rst new file mode 100644 index 000000000..adfa21b2c --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/reservations.rst @@ -0,0 +1,10 @@ +Reservations +------------------------------ + +.. automodule:: google.cloud.compute_v1.services.reservations + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.reservations.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/resource_policies.rst b/owl-bot-staging/v1/docs/compute_v1/resource_policies.rst new file mode 100644 index 000000000..39e2bab0e --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/resource_policies.rst @@ -0,0 +1,10 @@ +ResourcePolicies +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.resource_policies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.resource_policies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/routers.rst b/owl-bot-staging/v1/docs/compute_v1/routers.rst new file mode 100644 index 000000000..0a82b2235 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/routers.rst @@ -0,0 +1,10 @@ +Routers +------------------------- + +.. automodule:: google.cloud.compute_v1.services.routers + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.routers.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/routes.rst b/owl-bot-staging/v1/docs/compute_v1/routes.rst new file mode 100644 index 000000000..d03cd7e37 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/routes.rst @@ -0,0 +1,10 @@ +Routes +------------------------ + +.. automodule:: google.cloud.compute_v1.services.routes + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.routes.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/security_policies.rst b/owl-bot-staging/v1/docs/compute_v1/security_policies.rst new file mode 100644 index 000000000..a2893695b --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/security_policies.rst @@ -0,0 +1,10 @@ +SecurityPolicies +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.security_policies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.security_policies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/service_attachments.rst b/owl-bot-staging/v1/docs/compute_v1/service_attachments.rst new file mode 100644 index 000000000..07c4e4699 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/service_attachments.rst @@ -0,0 +1,10 @@ +ServiceAttachments +------------------------------------ + +.. automodule:: google.cloud.compute_v1.services.service_attachments + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.service_attachments.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/services.rst b/owl-bot-staging/v1/docs/compute_v1/services.rst new file mode 100644 index 000000000..36161c200 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/services.rst @@ -0,0 +1,85 @@ +Services for Google Cloud Compute v1 API +======================================== +.. toctree:: + :maxdepth: 2 + + accelerator_types + addresses + autoscalers + backend_buckets + backend_services + disks + disk_types + external_vpn_gateways + firewall_policies + firewalls + forwarding_rules + global_addresses + global_forwarding_rules + global_network_endpoint_groups + global_operations + global_organization_operations + global_public_delegated_prefixes + health_checks + image_family_views + images + instance_group_managers + instance_groups + instances + instance_templates + interconnect_attachments + interconnect_locations + interconnects + license_codes + licenses + machine_types + network_endpoint_groups + networks + node_groups + node_templates + node_types + packet_mirrorings + projects + public_advertised_prefixes + public_delegated_prefixes + region_autoscalers + region_backend_services + region_commitments + region_disks + region_disk_types + region_health_checks + region_health_check_services + region_instance_group_managers + region_instance_groups + region_instances + region_network_endpoint_groups + region_notification_endpoints + region_operations + regions + region_ssl_certificates + region_target_http_proxies + region_target_https_proxies + region_url_maps + reservations + resource_policies + routers + routes + security_policies + service_attachments + snapshots + ssl_certificates + ssl_policies + subnetworks + target_grpc_proxies + target_http_proxies + target_https_proxies + target_instances + target_pools + target_ssl_proxies + target_tcp_proxies + target_vpn_gateways + url_maps + vpn_gateways + vpn_tunnels + zone_operations + zones diff --git a/owl-bot-staging/v1/docs/compute_v1/snapshots.rst b/owl-bot-staging/v1/docs/compute_v1/snapshots.rst new file mode 100644 index 000000000..a29a1aa93 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/snapshots.rst @@ -0,0 +1,10 @@ +Snapshots +--------------------------- + +.. automodule:: google.cloud.compute_v1.services.snapshots + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.snapshots.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/ssl_certificates.rst b/owl-bot-staging/v1/docs/compute_v1/ssl_certificates.rst new file mode 100644 index 000000000..e90c34ddd --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/ssl_certificates.rst @@ -0,0 +1,10 @@ +SslCertificates +--------------------------------- + +.. automodule:: google.cloud.compute_v1.services.ssl_certificates + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.ssl_certificates.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/ssl_policies.rst b/owl-bot-staging/v1/docs/compute_v1/ssl_policies.rst new file mode 100644 index 000000000..d07c8b443 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/ssl_policies.rst @@ -0,0 +1,10 @@ +SslPolicies +----------------------------- + +.. automodule:: google.cloud.compute_v1.services.ssl_policies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.ssl_policies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/subnetworks.rst b/owl-bot-staging/v1/docs/compute_v1/subnetworks.rst new file mode 100644 index 000000000..cd20e0933 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/subnetworks.rst @@ -0,0 +1,10 @@ +Subnetworks +----------------------------- + +.. automodule:: google.cloud.compute_v1.services.subnetworks + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.subnetworks.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_grpc_proxies.rst b/owl-bot-staging/v1/docs/compute_v1/target_grpc_proxies.rst new file mode 100644 index 000000000..4ddd96105 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_grpc_proxies.rst @@ -0,0 +1,10 @@ +TargetGrpcProxies +----------------------------------- + +.. automodule:: google.cloud.compute_v1.services.target_grpc_proxies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_grpc_proxies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_http_proxies.rst b/owl-bot-staging/v1/docs/compute_v1/target_http_proxies.rst new file mode 100644 index 000000000..8fe97fe26 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_http_proxies.rst @@ -0,0 +1,10 @@ +TargetHttpProxies +----------------------------------- + +.. automodule:: google.cloud.compute_v1.services.target_http_proxies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_http_proxies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_https_proxies.rst b/owl-bot-staging/v1/docs/compute_v1/target_https_proxies.rst new file mode 100644 index 000000000..d3cd4242d --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_https_proxies.rst @@ -0,0 +1,10 @@ +TargetHttpsProxies +------------------------------------ + +.. automodule:: google.cloud.compute_v1.services.target_https_proxies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_https_proxies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_instances.rst b/owl-bot-staging/v1/docs/compute_v1/target_instances.rst new file mode 100644 index 000000000..9ab124ef9 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_instances.rst @@ -0,0 +1,10 @@ +TargetInstances +--------------------------------- + +.. automodule:: google.cloud.compute_v1.services.target_instances + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_instances.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_pools.rst b/owl-bot-staging/v1/docs/compute_v1/target_pools.rst new file mode 100644 index 000000000..7be339431 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_pools.rst @@ -0,0 +1,10 @@ +TargetPools +----------------------------- + +.. automodule:: google.cloud.compute_v1.services.target_pools + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_pools.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_ssl_proxies.rst b/owl-bot-staging/v1/docs/compute_v1/target_ssl_proxies.rst new file mode 100644 index 000000000..5c39bcf67 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_ssl_proxies.rst @@ -0,0 +1,10 @@ +TargetSslProxies +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.target_ssl_proxies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_ssl_proxies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_tcp_proxies.rst b/owl-bot-staging/v1/docs/compute_v1/target_tcp_proxies.rst new file mode 100644 index 000000000..940978c3f --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_tcp_proxies.rst @@ -0,0 +1,10 @@ +TargetTcpProxies +---------------------------------- + +.. automodule:: google.cloud.compute_v1.services.target_tcp_proxies + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_tcp_proxies.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/target_vpn_gateways.rst b/owl-bot-staging/v1/docs/compute_v1/target_vpn_gateways.rst new file mode 100644 index 000000000..0fb0f169c --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/target_vpn_gateways.rst @@ -0,0 +1,10 @@ +TargetVpnGateways +----------------------------------- + +.. automodule:: google.cloud.compute_v1.services.target_vpn_gateways + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.target_vpn_gateways.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/types.rst b/owl-bot-staging/v1/docs/compute_v1/types.rst new file mode 100644 index 000000000..37d307ace --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/types.rst @@ -0,0 +1,7 @@ +Types for Google Cloud Compute v1 API +===================================== + +.. automodule:: google.cloud.compute_v1.types + :members: + :undoc-members: + :show-inheritance: diff --git a/owl-bot-staging/v1/docs/compute_v1/url_maps.rst b/owl-bot-staging/v1/docs/compute_v1/url_maps.rst new file mode 100644 index 000000000..c0494fe72 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/url_maps.rst @@ -0,0 +1,10 @@ +UrlMaps +------------------------- + +.. automodule:: google.cloud.compute_v1.services.url_maps + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.url_maps.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/vpn_gateways.rst b/owl-bot-staging/v1/docs/compute_v1/vpn_gateways.rst new file mode 100644 index 000000000..4313fd9b8 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/vpn_gateways.rst @@ -0,0 +1,10 @@ +VpnGateways +----------------------------- + +.. automodule:: google.cloud.compute_v1.services.vpn_gateways + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.vpn_gateways.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/vpn_tunnels.rst b/owl-bot-staging/v1/docs/compute_v1/vpn_tunnels.rst new file mode 100644 index 000000000..ba0faf8ba --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/vpn_tunnels.rst @@ -0,0 +1,10 @@ +VpnTunnels +---------------------------- + +.. automodule:: google.cloud.compute_v1.services.vpn_tunnels + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.vpn_tunnels.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/zone_operations.rst b/owl-bot-staging/v1/docs/compute_v1/zone_operations.rst new file mode 100644 index 000000000..3ad84fbc8 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/zone_operations.rst @@ -0,0 +1,10 @@ +ZoneOperations +-------------------------------- + +.. automodule:: google.cloud.compute_v1.services.zone_operations + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.zone_operations.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/compute_v1/zones.rst b/owl-bot-staging/v1/docs/compute_v1/zones.rst new file mode 100644 index 000000000..f34131bf8 --- /dev/null +++ b/owl-bot-staging/v1/docs/compute_v1/zones.rst @@ -0,0 +1,10 @@ +Zones +----------------------- + +.. automodule:: google.cloud.compute_v1.services.zones + :members: + :inherited-members: + +.. automodule:: google.cloud.compute_v1.services.zones.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/v1/docs/conf.py b/owl-bot-staging/v1/docs/conf.py new file mode 100644 index 000000000..f8cfdac5e --- /dev/null +++ b/owl-bot-staging/v1/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-compute documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "1.6.3" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +project = u"google-cloud-compute" +copyright = u"2020, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-compute-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + master_doc, + "google-cloud-compute.tex", + u"google-cloud-compute Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + master_doc, + "google-cloud-compute", + u"Google Cloud Compute Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "google-cloud-compute", + u"google-cloud-compute Documentation", + author, + "google-cloud-compute", + "GAPIC library for Google Cloud Compute API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/v1/docs/index.rst b/owl-bot-staging/v1/docs/index.rst new file mode 100644 index 000000000..a84412687 --- /dev/null +++ b/owl-bot-staging/v1/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + compute_v1/services + compute_v1/types diff --git a/owl-bot-staging/v1/google/cloud/compute/__init__.py b/owl-bot-staging/v1/google/cloud/compute/__init__.py new file mode 100644 index 000000000..bba272f9a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute/__init__.py @@ -0,0 +1,2499 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.cloud.compute_v1.services.accelerator_types.client import AcceleratorTypesClient +from google.cloud.compute_v1.services.addresses.client import AddressesClient +from google.cloud.compute_v1.services.autoscalers.client import AutoscalersClient +from google.cloud.compute_v1.services.backend_buckets.client import BackendBucketsClient +from google.cloud.compute_v1.services.backend_services.client import BackendServicesClient +from google.cloud.compute_v1.services.disks.client import DisksClient +from google.cloud.compute_v1.services.disk_types.client import DiskTypesClient +from google.cloud.compute_v1.services.external_vpn_gateways.client import ExternalVpnGatewaysClient +from google.cloud.compute_v1.services.firewall_policies.client import FirewallPoliciesClient +from google.cloud.compute_v1.services.firewalls.client import FirewallsClient +from google.cloud.compute_v1.services.forwarding_rules.client import ForwardingRulesClient +from google.cloud.compute_v1.services.global_addresses.client import GlobalAddressesClient +from google.cloud.compute_v1.services.global_forwarding_rules.client import GlobalForwardingRulesClient +from google.cloud.compute_v1.services.global_network_endpoint_groups.client import GlobalNetworkEndpointGroupsClient +from google.cloud.compute_v1.services.global_operations.client import GlobalOperationsClient +from google.cloud.compute_v1.services.global_organization_operations.client import GlobalOrganizationOperationsClient +from google.cloud.compute_v1.services.global_public_delegated_prefixes.client import GlobalPublicDelegatedPrefixesClient +from google.cloud.compute_v1.services.health_checks.client import HealthChecksClient +from google.cloud.compute_v1.services.image_family_views.client import ImageFamilyViewsClient +from google.cloud.compute_v1.services.images.client import ImagesClient +from google.cloud.compute_v1.services.instance_group_managers.client import InstanceGroupManagersClient +from google.cloud.compute_v1.services.instance_groups.client import InstanceGroupsClient +from google.cloud.compute_v1.services.instances.client import InstancesClient +from google.cloud.compute_v1.services.instance_templates.client import InstanceTemplatesClient +from google.cloud.compute_v1.services.interconnect_attachments.client import InterconnectAttachmentsClient +from google.cloud.compute_v1.services.interconnect_locations.client import InterconnectLocationsClient +from google.cloud.compute_v1.services.interconnects.client import InterconnectsClient +from google.cloud.compute_v1.services.license_codes.client import LicenseCodesClient +from google.cloud.compute_v1.services.licenses.client import LicensesClient +from google.cloud.compute_v1.services.machine_types.client import MachineTypesClient +from google.cloud.compute_v1.services.network_endpoint_groups.client import NetworkEndpointGroupsClient +from google.cloud.compute_v1.services.networks.client import NetworksClient +from google.cloud.compute_v1.services.node_groups.client import NodeGroupsClient +from google.cloud.compute_v1.services.node_templates.client import NodeTemplatesClient +from google.cloud.compute_v1.services.node_types.client import NodeTypesClient +from google.cloud.compute_v1.services.packet_mirrorings.client import PacketMirroringsClient +from google.cloud.compute_v1.services.projects.client import ProjectsClient +from google.cloud.compute_v1.services.public_advertised_prefixes.client import PublicAdvertisedPrefixesClient +from google.cloud.compute_v1.services.public_delegated_prefixes.client import PublicDelegatedPrefixesClient +from google.cloud.compute_v1.services.region_autoscalers.client import RegionAutoscalersClient +from google.cloud.compute_v1.services.region_backend_services.client import RegionBackendServicesClient +from google.cloud.compute_v1.services.region_commitments.client import RegionCommitmentsClient +from google.cloud.compute_v1.services.region_disks.client import RegionDisksClient +from google.cloud.compute_v1.services.region_disk_types.client import RegionDiskTypesClient +from google.cloud.compute_v1.services.region_health_checks.client import RegionHealthChecksClient +from google.cloud.compute_v1.services.region_health_check_services.client import RegionHealthCheckServicesClient +from google.cloud.compute_v1.services.region_instance_group_managers.client import RegionInstanceGroupManagersClient +from google.cloud.compute_v1.services.region_instance_groups.client import RegionInstanceGroupsClient +from google.cloud.compute_v1.services.region_instances.client import RegionInstancesClient +from google.cloud.compute_v1.services.region_network_endpoint_groups.client import RegionNetworkEndpointGroupsClient +from google.cloud.compute_v1.services.region_notification_endpoints.client import RegionNotificationEndpointsClient +from google.cloud.compute_v1.services.region_operations.client import RegionOperationsClient +from google.cloud.compute_v1.services.regions.client import RegionsClient +from google.cloud.compute_v1.services.region_ssl_certificates.client import RegionSslCertificatesClient +from google.cloud.compute_v1.services.region_target_http_proxies.client import RegionTargetHttpProxiesClient +from google.cloud.compute_v1.services.region_target_https_proxies.client import RegionTargetHttpsProxiesClient +from google.cloud.compute_v1.services.region_url_maps.client import RegionUrlMapsClient +from google.cloud.compute_v1.services.reservations.client import ReservationsClient +from google.cloud.compute_v1.services.resource_policies.client import ResourcePoliciesClient +from google.cloud.compute_v1.services.routers.client import RoutersClient +from google.cloud.compute_v1.services.routes.client import RoutesClient +from google.cloud.compute_v1.services.security_policies.client import SecurityPoliciesClient +from google.cloud.compute_v1.services.service_attachments.client import ServiceAttachmentsClient +from google.cloud.compute_v1.services.snapshots.client import SnapshotsClient +from google.cloud.compute_v1.services.ssl_certificates.client import SslCertificatesClient +from google.cloud.compute_v1.services.ssl_policies.client import SslPoliciesClient +from google.cloud.compute_v1.services.subnetworks.client import SubnetworksClient +from google.cloud.compute_v1.services.target_grpc_proxies.client import TargetGrpcProxiesClient +from google.cloud.compute_v1.services.target_http_proxies.client import TargetHttpProxiesClient +from google.cloud.compute_v1.services.target_https_proxies.client import TargetHttpsProxiesClient +from google.cloud.compute_v1.services.target_instances.client import TargetInstancesClient +from google.cloud.compute_v1.services.target_pools.client import TargetPoolsClient +from google.cloud.compute_v1.services.target_ssl_proxies.client import TargetSslProxiesClient +from google.cloud.compute_v1.services.target_tcp_proxies.client import TargetTcpProxiesClient +from google.cloud.compute_v1.services.target_vpn_gateways.client import TargetVpnGatewaysClient +from google.cloud.compute_v1.services.url_maps.client import UrlMapsClient +from google.cloud.compute_v1.services.vpn_gateways.client import VpnGatewaysClient +from google.cloud.compute_v1.services.vpn_tunnels.client import VpnTunnelsClient +from google.cloud.compute_v1.services.zone_operations.client import ZoneOperationsClient +from google.cloud.compute_v1.services.zones.client import ZonesClient + +from google.cloud.compute_v1.types.compute import AbandonInstancesInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import AbandonInstancesRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import AcceleratorConfig +from google.cloud.compute_v1.types.compute import Accelerators +from google.cloud.compute_v1.types.compute import AcceleratorType +from google.cloud.compute_v1.types.compute import AcceleratorTypeAggregatedList +from google.cloud.compute_v1.types.compute import AcceleratorTypeList +from google.cloud.compute_v1.types.compute import AcceleratorTypesScopedList +from google.cloud.compute_v1.types.compute import AccessConfig +from google.cloud.compute_v1.types.compute import AddAccessConfigInstanceRequest +from google.cloud.compute_v1.types.compute import AddAssociationFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import AddHealthCheckTargetPoolRequest +from google.cloud.compute_v1.types.compute import AddInstancesInstanceGroupRequest +from google.cloud.compute_v1.types.compute import AddInstanceTargetPoolRequest +from google.cloud.compute_v1.types.compute import AddNodesNodeGroupRequest +from google.cloud.compute_v1.types.compute import AddPeeringNetworkRequest +from google.cloud.compute_v1.types.compute import AddResourcePoliciesDiskRequest +from google.cloud.compute_v1.types.compute import AddResourcePoliciesInstanceRequest +from google.cloud.compute_v1.types.compute import AddResourcePoliciesRegionDiskRequest +from google.cloud.compute_v1.types.compute import Address +from google.cloud.compute_v1.types.compute import AddressAggregatedList +from google.cloud.compute_v1.types.compute import AddressesScopedList +from google.cloud.compute_v1.types.compute import AddressList +from google.cloud.compute_v1.types.compute import AddRuleFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import AddRuleSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import AddSignedUrlKeyBackendBucketRequest +from google.cloud.compute_v1.types.compute import AddSignedUrlKeyBackendServiceRequest +from google.cloud.compute_v1.types.compute import AdvancedMachineFeatures +from google.cloud.compute_v1.types.compute import AggregatedListAcceleratorTypesRequest +from google.cloud.compute_v1.types.compute import AggregatedListAddressesRequest +from google.cloud.compute_v1.types.compute import AggregatedListAutoscalersRequest +from google.cloud.compute_v1.types.compute import AggregatedListBackendServicesRequest +from google.cloud.compute_v1.types.compute import AggregatedListDisksRequest +from google.cloud.compute_v1.types.compute import AggregatedListDiskTypesRequest +from google.cloud.compute_v1.types.compute import AggregatedListForwardingRulesRequest +from google.cloud.compute_v1.types.compute import AggregatedListGlobalOperationsRequest +from google.cloud.compute_v1.types.compute import AggregatedListHealthChecksRequest +from google.cloud.compute_v1.types.compute import AggregatedListInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import AggregatedListInstanceGroupsRequest +from google.cloud.compute_v1.types.compute import AggregatedListInstancesRequest +from google.cloud.compute_v1.types.compute import AggregatedListInterconnectAttachmentsRequest +from google.cloud.compute_v1.types.compute import AggregatedListMachineTypesRequest +from google.cloud.compute_v1.types.compute import AggregatedListNetworkEndpointGroupsRequest +from google.cloud.compute_v1.types.compute import AggregatedListNodeGroupsRequest +from google.cloud.compute_v1.types.compute import AggregatedListNodeTemplatesRequest +from google.cloud.compute_v1.types.compute import AggregatedListNodeTypesRequest +from google.cloud.compute_v1.types.compute import AggregatedListPacketMirroringsRequest +from google.cloud.compute_v1.types.compute import AggregatedListPublicDelegatedPrefixesRequest +from google.cloud.compute_v1.types.compute import AggregatedListRegionCommitmentsRequest +from google.cloud.compute_v1.types.compute import AggregatedListReservationsRequest +from google.cloud.compute_v1.types.compute import AggregatedListResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import AggregatedListRoutersRequest +from google.cloud.compute_v1.types.compute import AggregatedListServiceAttachmentsRequest +from google.cloud.compute_v1.types.compute import AggregatedListSslCertificatesRequest +from google.cloud.compute_v1.types.compute import AggregatedListSubnetworksRequest +from google.cloud.compute_v1.types.compute import AggregatedListTargetHttpProxiesRequest +from google.cloud.compute_v1.types.compute import AggregatedListTargetHttpsProxiesRequest +from google.cloud.compute_v1.types.compute import AggregatedListTargetInstancesRequest +from google.cloud.compute_v1.types.compute import AggregatedListTargetPoolsRequest +from google.cloud.compute_v1.types.compute import AggregatedListTargetVpnGatewaysRequest +from google.cloud.compute_v1.types.compute import AggregatedListUrlMapsRequest +from google.cloud.compute_v1.types.compute import AggregatedListVpnGatewaysRequest +from google.cloud.compute_v1.types.compute import AggregatedListVpnTunnelsRequest +from google.cloud.compute_v1.types.compute import AliasIpRange +from google.cloud.compute_v1.types.compute import AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk +from google.cloud.compute_v1.types.compute import AllocationSpecificSKUAllocationReservedInstanceProperties +from google.cloud.compute_v1.types.compute import AllocationSpecificSKUReservation +from google.cloud.compute_v1.types.compute import Allowed +from google.cloud.compute_v1.types.compute import ApplyUpdatesToInstancesInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import AttachDiskInstanceRequest +from google.cloud.compute_v1.types.compute import AttachedDisk +from google.cloud.compute_v1.types.compute import AttachedDiskInitializeParams +from google.cloud.compute_v1.types.compute import AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import AttachNetworkEndpointsNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import AuditConfig +from google.cloud.compute_v1.types.compute import AuditLogConfig +from google.cloud.compute_v1.types.compute import AuthorizationLoggingOptions +from google.cloud.compute_v1.types.compute import Autoscaler +from google.cloud.compute_v1.types.compute import AutoscalerAggregatedList +from google.cloud.compute_v1.types.compute import AutoscalerList +from google.cloud.compute_v1.types.compute import AutoscalersScopedList +from google.cloud.compute_v1.types.compute import AutoscalerStatusDetails +from google.cloud.compute_v1.types.compute import AutoscalingPolicy +from google.cloud.compute_v1.types.compute import AutoscalingPolicyCpuUtilization +from google.cloud.compute_v1.types.compute import AutoscalingPolicyCustomMetricUtilization +from google.cloud.compute_v1.types.compute import AutoscalingPolicyLoadBalancingUtilization +from google.cloud.compute_v1.types.compute import AutoscalingPolicyScaleInControl +from google.cloud.compute_v1.types.compute import AutoscalingPolicyScalingSchedule +from google.cloud.compute_v1.types.compute import Backend +from google.cloud.compute_v1.types.compute import BackendBucket +from google.cloud.compute_v1.types.compute import BackendBucketCdnPolicy +from google.cloud.compute_v1.types.compute import BackendBucketCdnPolicyBypassCacheOnRequestHeader +from google.cloud.compute_v1.types.compute import BackendBucketCdnPolicyNegativeCachingPolicy +from google.cloud.compute_v1.types.compute import BackendBucketList +from google.cloud.compute_v1.types.compute import BackendService +from google.cloud.compute_v1.types.compute import BackendServiceAggregatedList +from google.cloud.compute_v1.types.compute import BackendServiceCdnPolicy +from google.cloud.compute_v1.types.compute import BackendServiceCdnPolicyBypassCacheOnRequestHeader +from google.cloud.compute_v1.types.compute import BackendServiceCdnPolicyNegativeCachingPolicy +from google.cloud.compute_v1.types.compute import BackendServiceFailoverPolicy +from google.cloud.compute_v1.types.compute import BackendServiceGroupHealth +from google.cloud.compute_v1.types.compute import BackendServiceIAP +from google.cloud.compute_v1.types.compute import BackendServiceList +from google.cloud.compute_v1.types.compute import BackendServiceLogConfig +from google.cloud.compute_v1.types.compute import BackendServiceReference +from google.cloud.compute_v1.types.compute import BackendServicesScopedList +from google.cloud.compute_v1.types.compute import Binding +from google.cloud.compute_v1.types.compute import BulkInsertInstanceRequest +from google.cloud.compute_v1.types.compute import BulkInsertInstanceResource +from google.cloud.compute_v1.types.compute import BulkInsertInstanceResourcePerInstanceProperties +from google.cloud.compute_v1.types.compute import BulkInsertRegionInstanceRequest +from google.cloud.compute_v1.types.compute import CacheInvalidationRule +from google.cloud.compute_v1.types.compute import CacheKeyPolicy +from google.cloud.compute_v1.types.compute import CircuitBreakers +from google.cloud.compute_v1.types.compute import CloneRulesFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import Commitment +from google.cloud.compute_v1.types.compute import CommitmentAggregatedList +from google.cloud.compute_v1.types.compute import CommitmentList +from google.cloud.compute_v1.types.compute import CommitmentsScopedList +from google.cloud.compute_v1.types.compute import Condition +from google.cloud.compute_v1.types.compute import ConfidentialInstanceConfig +from google.cloud.compute_v1.types.compute import ConnectionDraining +from google.cloud.compute_v1.types.compute import ConsistentHashLoadBalancerSettings +from google.cloud.compute_v1.types.compute import ConsistentHashLoadBalancerSettingsHttpCookie +from google.cloud.compute_v1.types.compute import CorsPolicy +from google.cloud.compute_v1.types.compute import CreateInstancesInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import CreateInstancesRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import CreateSnapshotDiskRequest +from google.cloud.compute_v1.types.compute import CreateSnapshotRegionDiskRequest +from google.cloud.compute_v1.types.compute import CustomerEncryptionKey +from google.cloud.compute_v1.types.compute import CustomerEncryptionKeyProtectedDisk +from google.cloud.compute_v1.types.compute import Data +from google.cloud.compute_v1.types.compute import DeleteAccessConfigInstanceRequest +from google.cloud.compute_v1.types.compute import DeleteAddressRequest +from google.cloud.compute_v1.types.compute import DeleteAutoscalerRequest +from google.cloud.compute_v1.types.compute import DeleteBackendBucketRequest +from google.cloud.compute_v1.types.compute import DeleteBackendServiceRequest +from google.cloud.compute_v1.types.compute import DeleteDiskRequest +from google.cloud.compute_v1.types.compute import DeleteExternalVpnGatewayRequest +from google.cloud.compute_v1.types.compute import DeleteFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import DeleteFirewallRequest +from google.cloud.compute_v1.types.compute import DeleteForwardingRuleRequest +from google.cloud.compute_v1.types.compute import DeleteGlobalAddressRequest +from google.cloud.compute_v1.types.compute import DeleteGlobalForwardingRuleRequest +from google.cloud.compute_v1.types.compute import DeleteGlobalNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import DeleteGlobalOperationRequest +from google.cloud.compute_v1.types.compute import DeleteGlobalOperationResponse +from google.cloud.compute_v1.types.compute import DeleteGlobalOrganizationOperationRequest +from google.cloud.compute_v1.types.compute import DeleteGlobalOrganizationOperationResponse +from google.cloud.compute_v1.types.compute import DeleteGlobalPublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import DeleteHealthCheckRequest +from google.cloud.compute_v1.types.compute import DeleteImageRequest +from google.cloud.compute_v1.types.compute import DeleteInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import DeleteInstanceGroupRequest +from google.cloud.compute_v1.types.compute import DeleteInstanceRequest +from google.cloud.compute_v1.types.compute import DeleteInstancesInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import DeleteInstancesRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import DeleteInstanceTemplateRequest +from google.cloud.compute_v1.types.compute import DeleteInterconnectAttachmentRequest +from google.cloud.compute_v1.types.compute import DeleteInterconnectRequest +from google.cloud.compute_v1.types.compute import DeleteLicenseRequest +from google.cloud.compute_v1.types.compute import DeleteNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import DeleteNetworkRequest +from google.cloud.compute_v1.types.compute import DeleteNodeGroupRequest +from google.cloud.compute_v1.types.compute import DeleteNodesNodeGroupRequest +from google.cloud.compute_v1.types.compute import DeleteNodeTemplateRequest +from google.cloud.compute_v1.types.compute import DeletePacketMirroringRequest +from google.cloud.compute_v1.types.compute import DeletePerInstanceConfigsInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import DeletePerInstanceConfigsRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import DeletePublicAdvertisedPrefixeRequest +from google.cloud.compute_v1.types.compute import DeletePublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import DeleteRegionAutoscalerRequest +from google.cloud.compute_v1.types.compute import DeleteRegionBackendServiceRequest +from google.cloud.compute_v1.types.compute import DeleteRegionDiskRequest +from google.cloud.compute_v1.types.compute import DeleteRegionHealthCheckRequest +from google.cloud.compute_v1.types.compute import DeleteRegionHealthCheckServiceRequest +from google.cloud.compute_v1.types.compute import DeleteRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import DeleteRegionNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import DeleteRegionNotificationEndpointRequest +from google.cloud.compute_v1.types.compute import DeleteRegionOperationRequest +from google.cloud.compute_v1.types.compute import DeleteRegionOperationResponse +from google.cloud.compute_v1.types.compute import DeleteRegionSslCertificateRequest +from google.cloud.compute_v1.types.compute import DeleteRegionTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import DeleteRegionTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import DeleteRegionUrlMapRequest +from google.cloud.compute_v1.types.compute import DeleteReservationRequest +from google.cloud.compute_v1.types.compute import DeleteResourcePolicyRequest +from google.cloud.compute_v1.types.compute import DeleteRouteRequest +from google.cloud.compute_v1.types.compute import DeleteRouterRequest +from google.cloud.compute_v1.types.compute import DeleteSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import DeleteServiceAttachmentRequest +from google.cloud.compute_v1.types.compute import DeleteSignedUrlKeyBackendBucketRequest +from google.cloud.compute_v1.types.compute import DeleteSignedUrlKeyBackendServiceRequest +from google.cloud.compute_v1.types.compute import DeleteSnapshotRequest +from google.cloud.compute_v1.types.compute import DeleteSslCertificateRequest +from google.cloud.compute_v1.types.compute import DeleteSslPolicyRequest +from google.cloud.compute_v1.types.compute import DeleteSubnetworkRequest +from google.cloud.compute_v1.types.compute import DeleteTargetGrpcProxyRequest +from google.cloud.compute_v1.types.compute import DeleteTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import DeleteTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import DeleteTargetInstanceRequest +from google.cloud.compute_v1.types.compute import DeleteTargetPoolRequest +from google.cloud.compute_v1.types.compute import DeleteTargetSslProxyRequest +from google.cloud.compute_v1.types.compute import DeleteTargetTcpProxyRequest +from google.cloud.compute_v1.types.compute import DeleteTargetVpnGatewayRequest +from google.cloud.compute_v1.types.compute import DeleteUrlMapRequest +from google.cloud.compute_v1.types.compute import DeleteVpnGatewayRequest +from google.cloud.compute_v1.types.compute import DeleteVpnTunnelRequest +from google.cloud.compute_v1.types.compute import DeleteZoneOperationRequest +from google.cloud.compute_v1.types.compute import DeleteZoneOperationResponse +from google.cloud.compute_v1.types.compute import Denied +from google.cloud.compute_v1.types.compute import DeprecateImageRequest +from google.cloud.compute_v1.types.compute import DeprecationStatus +from google.cloud.compute_v1.types.compute import DetachDiskInstanceRequest +from google.cloud.compute_v1.types.compute import DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import DetachNetworkEndpointsNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import DisableXpnHostProjectRequest +from google.cloud.compute_v1.types.compute import DisableXpnResourceProjectRequest +from google.cloud.compute_v1.types.compute import Disk +from google.cloud.compute_v1.types.compute import DiskAggregatedList +from google.cloud.compute_v1.types.compute import DiskInstantiationConfig +from google.cloud.compute_v1.types.compute import DiskList +from google.cloud.compute_v1.types.compute import DiskMoveRequest +from google.cloud.compute_v1.types.compute import DisksAddResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import DisksRemoveResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import DisksResizeRequest +from google.cloud.compute_v1.types.compute import DisksScopedList +from google.cloud.compute_v1.types.compute import DiskType +from google.cloud.compute_v1.types.compute import DiskTypeAggregatedList +from google.cloud.compute_v1.types.compute import DiskTypeList +from google.cloud.compute_v1.types.compute import DiskTypesScopedList +from google.cloud.compute_v1.types.compute import DisplayDevice +from google.cloud.compute_v1.types.compute import DistributionPolicy +from google.cloud.compute_v1.types.compute import DistributionPolicyZoneConfiguration +from google.cloud.compute_v1.types.compute import Duration +from google.cloud.compute_v1.types.compute import EnableXpnHostProjectRequest +from google.cloud.compute_v1.types.compute import EnableXpnResourceProjectRequest +from google.cloud.compute_v1.types.compute import Error +from google.cloud.compute_v1.types.compute import Errors +from google.cloud.compute_v1.types.compute import ExchangedPeeringRoute +from google.cloud.compute_v1.types.compute import ExchangedPeeringRoutesList +from google.cloud.compute_v1.types.compute import ExpandIpCidrRangeSubnetworkRequest +from google.cloud.compute_v1.types.compute import Expr +from google.cloud.compute_v1.types.compute import ExternalVpnGateway +from google.cloud.compute_v1.types.compute import ExternalVpnGatewayInterface +from google.cloud.compute_v1.types.compute import ExternalVpnGatewayList +from google.cloud.compute_v1.types.compute import FileContentBuffer +from google.cloud.compute_v1.types.compute import Firewall +from google.cloud.compute_v1.types.compute import FirewallList +from google.cloud.compute_v1.types.compute import FirewallLogConfig +from google.cloud.compute_v1.types.compute import FirewallPoliciesListAssociationsResponse +from google.cloud.compute_v1.types.compute import FirewallPolicy +from google.cloud.compute_v1.types.compute import FirewallPolicyAssociation +from google.cloud.compute_v1.types.compute import FirewallPolicyList +from google.cloud.compute_v1.types.compute import FirewallPolicyRule +from google.cloud.compute_v1.types.compute import FirewallPolicyRuleMatcher +from google.cloud.compute_v1.types.compute import FirewallPolicyRuleMatcherLayer4Config +from google.cloud.compute_v1.types.compute import FixedOrPercent +from google.cloud.compute_v1.types.compute import ForwardingRule +from google.cloud.compute_v1.types.compute import ForwardingRuleAggregatedList +from google.cloud.compute_v1.types.compute import ForwardingRuleList +from google.cloud.compute_v1.types.compute import ForwardingRuleReference +from google.cloud.compute_v1.types.compute import ForwardingRuleServiceDirectoryRegistration +from google.cloud.compute_v1.types.compute import ForwardingRulesScopedList +from google.cloud.compute_v1.types.compute import GetAcceleratorTypeRequest +from google.cloud.compute_v1.types.compute import GetAddressRequest +from google.cloud.compute_v1.types.compute import GetAssociationFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import GetAutoscalerRequest +from google.cloud.compute_v1.types.compute import GetBackendBucketRequest +from google.cloud.compute_v1.types.compute import GetBackendServiceRequest +from google.cloud.compute_v1.types.compute import GetDiagnosticsInterconnectRequest +from google.cloud.compute_v1.types.compute import GetDiskRequest +from google.cloud.compute_v1.types.compute import GetDiskTypeRequest +from google.cloud.compute_v1.types.compute import GetEffectiveFirewallsInstanceRequest +from google.cloud.compute_v1.types.compute import GetEffectiveFirewallsNetworkRequest +from google.cloud.compute_v1.types.compute import GetExternalVpnGatewayRequest +from google.cloud.compute_v1.types.compute import GetFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import GetFirewallRequest +from google.cloud.compute_v1.types.compute import GetForwardingRuleRequest +from google.cloud.compute_v1.types.compute import GetFromFamilyImageRequest +from google.cloud.compute_v1.types.compute import GetGlobalAddressRequest +from google.cloud.compute_v1.types.compute import GetGlobalForwardingRuleRequest +from google.cloud.compute_v1.types.compute import GetGlobalNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import GetGlobalOperationRequest +from google.cloud.compute_v1.types.compute import GetGlobalOrganizationOperationRequest +from google.cloud.compute_v1.types.compute import GetGlobalPublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import GetGuestAttributesInstanceRequest +from google.cloud.compute_v1.types.compute import GetHealthBackendServiceRequest +from google.cloud.compute_v1.types.compute import GetHealthCheckRequest +from google.cloud.compute_v1.types.compute import GetHealthRegionBackendServiceRequest +from google.cloud.compute_v1.types.compute import GetHealthTargetPoolRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyDiskRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyImageRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyInstanceRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyInstanceTemplateRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyLicenseRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyNodeGroupRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyNodeTemplateRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyRegionDiskRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyReservationRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyResourcePolicyRequest +from google.cloud.compute_v1.types.compute import GetIamPolicyServiceAttachmentRequest +from google.cloud.compute_v1.types.compute import GetIamPolicySnapshotRequest +from google.cloud.compute_v1.types.compute import GetIamPolicySubnetworkRequest +from google.cloud.compute_v1.types.compute import GetImageFamilyViewRequest +from google.cloud.compute_v1.types.compute import GetImageRequest +from google.cloud.compute_v1.types.compute import GetInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import GetInstanceGroupRequest +from google.cloud.compute_v1.types.compute import GetInstanceRequest +from google.cloud.compute_v1.types.compute import GetInstanceTemplateRequest +from google.cloud.compute_v1.types.compute import GetInterconnectAttachmentRequest +from google.cloud.compute_v1.types.compute import GetInterconnectLocationRequest +from google.cloud.compute_v1.types.compute import GetInterconnectRequest +from google.cloud.compute_v1.types.compute import GetLicenseCodeRequest +from google.cloud.compute_v1.types.compute import GetLicenseRequest +from google.cloud.compute_v1.types.compute import GetMachineTypeRequest +from google.cloud.compute_v1.types.compute import GetNatMappingInfoRoutersRequest +from google.cloud.compute_v1.types.compute import GetNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import GetNetworkRequest +from google.cloud.compute_v1.types.compute import GetNodeGroupRequest +from google.cloud.compute_v1.types.compute import GetNodeTemplateRequest +from google.cloud.compute_v1.types.compute import GetNodeTypeRequest +from google.cloud.compute_v1.types.compute import GetPacketMirroringRequest +from google.cloud.compute_v1.types.compute import GetProjectRequest +from google.cloud.compute_v1.types.compute import GetPublicAdvertisedPrefixeRequest +from google.cloud.compute_v1.types.compute import GetPublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import GetRegionAutoscalerRequest +from google.cloud.compute_v1.types.compute import GetRegionBackendServiceRequest +from google.cloud.compute_v1.types.compute import GetRegionCommitmentRequest +from google.cloud.compute_v1.types.compute import GetRegionDiskRequest +from google.cloud.compute_v1.types.compute import GetRegionDiskTypeRequest +from google.cloud.compute_v1.types.compute import GetRegionHealthCheckRequest +from google.cloud.compute_v1.types.compute import GetRegionHealthCheckServiceRequest +from google.cloud.compute_v1.types.compute import GetRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import GetRegionInstanceGroupRequest +from google.cloud.compute_v1.types.compute import GetRegionNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import GetRegionNotificationEndpointRequest +from google.cloud.compute_v1.types.compute import GetRegionOperationRequest +from google.cloud.compute_v1.types.compute import GetRegionRequest +from google.cloud.compute_v1.types.compute import GetRegionSslCertificateRequest +from google.cloud.compute_v1.types.compute import GetRegionTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import GetRegionTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import GetRegionUrlMapRequest +from google.cloud.compute_v1.types.compute import GetReservationRequest +from google.cloud.compute_v1.types.compute import GetResourcePolicyRequest +from google.cloud.compute_v1.types.compute import GetRouteRequest +from google.cloud.compute_v1.types.compute import GetRouterRequest +from google.cloud.compute_v1.types.compute import GetRouterStatusRouterRequest +from google.cloud.compute_v1.types.compute import GetRuleFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import GetRuleSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import GetScreenshotInstanceRequest +from google.cloud.compute_v1.types.compute import GetSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import GetSerialPortOutputInstanceRequest +from google.cloud.compute_v1.types.compute import GetServiceAttachmentRequest +from google.cloud.compute_v1.types.compute import GetShieldedInstanceIdentityInstanceRequest +from google.cloud.compute_v1.types.compute import GetSnapshotRequest +from google.cloud.compute_v1.types.compute import GetSslCertificateRequest +from google.cloud.compute_v1.types.compute import GetSslPolicyRequest +from google.cloud.compute_v1.types.compute import GetStatusVpnGatewayRequest +from google.cloud.compute_v1.types.compute import GetSubnetworkRequest +from google.cloud.compute_v1.types.compute import GetTargetGrpcProxyRequest +from google.cloud.compute_v1.types.compute import GetTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import GetTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import GetTargetInstanceRequest +from google.cloud.compute_v1.types.compute import GetTargetPoolRequest +from google.cloud.compute_v1.types.compute import GetTargetSslProxyRequest +from google.cloud.compute_v1.types.compute import GetTargetTcpProxyRequest +from google.cloud.compute_v1.types.compute import GetTargetVpnGatewayRequest +from google.cloud.compute_v1.types.compute import GetUrlMapRequest +from google.cloud.compute_v1.types.compute import GetVpnGatewayRequest +from google.cloud.compute_v1.types.compute import GetVpnTunnelRequest +from google.cloud.compute_v1.types.compute import GetXpnHostProjectRequest +from google.cloud.compute_v1.types.compute import GetXpnResourcesProjectsRequest +from google.cloud.compute_v1.types.compute import GetZoneOperationRequest +from google.cloud.compute_v1.types.compute import GetZoneRequest +from google.cloud.compute_v1.types.compute import GlobalNetworkEndpointGroupsAttachEndpointsRequest +from google.cloud.compute_v1.types.compute import GlobalNetworkEndpointGroupsDetachEndpointsRequest +from google.cloud.compute_v1.types.compute import GlobalOrganizationSetPolicyRequest +from google.cloud.compute_v1.types.compute import GlobalSetLabelsRequest +from google.cloud.compute_v1.types.compute import GlobalSetPolicyRequest +from google.cloud.compute_v1.types.compute import GRPCHealthCheck +from google.cloud.compute_v1.types.compute import GuestAttributes +from google.cloud.compute_v1.types.compute import GuestAttributesEntry +from google.cloud.compute_v1.types.compute import GuestAttributesValue +from google.cloud.compute_v1.types.compute import GuestOsFeature +from google.cloud.compute_v1.types.compute import HealthCheck +from google.cloud.compute_v1.types.compute import HealthCheckList +from google.cloud.compute_v1.types.compute import HealthCheckLogConfig +from google.cloud.compute_v1.types.compute import HealthCheckReference +from google.cloud.compute_v1.types.compute import HealthChecksAggregatedList +from google.cloud.compute_v1.types.compute import HealthCheckService +from google.cloud.compute_v1.types.compute import HealthCheckServiceReference +from google.cloud.compute_v1.types.compute import HealthCheckServicesList +from google.cloud.compute_v1.types.compute import HealthChecksScopedList +from google.cloud.compute_v1.types.compute import HealthStatus +from google.cloud.compute_v1.types.compute import HealthStatusForNetworkEndpoint +from google.cloud.compute_v1.types.compute import HostRule +from google.cloud.compute_v1.types.compute import HTTP2HealthCheck +from google.cloud.compute_v1.types.compute import HttpFaultAbort +from google.cloud.compute_v1.types.compute import HttpFaultDelay +from google.cloud.compute_v1.types.compute import HttpFaultInjection +from google.cloud.compute_v1.types.compute import HttpHeaderAction +from google.cloud.compute_v1.types.compute import HttpHeaderMatch +from google.cloud.compute_v1.types.compute import HttpHeaderOption +from google.cloud.compute_v1.types.compute import HTTPHealthCheck +from google.cloud.compute_v1.types.compute import HttpQueryParameterMatch +from google.cloud.compute_v1.types.compute import HttpRedirectAction +from google.cloud.compute_v1.types.compute import HttpRetryPolicy +from google.cloud.compute_v1.types.compute import HttpRouteAction +from google.cloud.compute_v1.types.compute import HttpRouteRule +from google.cloud.compute_v1.types.compute import HttpRouteRuleMatch +from google.cloud.compute_v1.types.compute import HTTPSHealthCheck +from google.cloud.compute_v1.types.compute import Image +from google.cloud.compute_v1.types.compute import ImageFamilyView +from google.cloud.compute_v1.types.compute import ImageList +from google.cloud.compute_v1.types.compute import InitialStateConfig +from google.cloud.compute_v1.types.compute import InsertAddressRequest +from google.cloud.compute_v1.types.compute import InsertAutoscalerRequest +from google.cloud.compute_v1.types.compute import InsertBackendBucketRequest +from google.cloud.compute_v1.types.compute import InsertBackendServiceRequest +from google.cloud.compute_v1.types.compute import InsertDiskRequest +from google.cloud.compute_v1.types.compute import InsertExternalVpnGatewayRequest +from google.cloud.compute_v1.types.compute import InsertFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import InsertFirewallRequest +from google.cloud.compute_v1.types.compute import InsertForwardingRuleRequest +from google.cloud.compute_v1.types.compute import InsertGlobalAddressRequest +from google.cloud.compute_v1.types.compute import InsertGlobalForwardingRuleRequest +from google.cloud.compute_v1.types.compute import InsertGlobalNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import InsertGlobalPublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import InsertHealthCheckRequest +from google.cloud.compute_v1.types.compute import InsertImageRequest +from google.cloud.compute_v1.types.compute import InsertInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import InsertInstanceGroupRequest +from google.cloud.compute_v1.types.compute import InsertInstanceRequest +from google.cloud.compute_v1.types.compute import InsertInstanceTemplateRequest +from google.cloud.compute_v1.types.compute import InsertInterconnectAttachmentRequest +from google.cloud.compute_v1.types.compute import InsertInterconnectRequest +from google.cloud.compute_v1.types.compute import InsertLicenseRequest +from google.cloud.compute_v1.types.compute import InsertNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import InsertNetworkRequest +from google.cloud.compute_v1.types.compute import InsertNodeGroupRequest +from google.cloud.compute_v1.types.compute import InsertNodeTemplateRequest +from google.cloud.compute_v1.types.compute import InsertPacketMirroringRequest +from google.cloud.compute_v1.types.compute import InsertPublicAdvertisedPrefixeRequest +from google.cloud.compute_v1.types.compute import InsertPublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import InsertRegionAutoscalerRequest +from google.cloud.compute_v1.types.compute import InsertRegionBackendServiceRequest +from google.cloud.compute_v1.types.compute import InsertRegionCommitmentRequest +from google.cloud.compute_v1.types.compute import InsertRegionDiskRequest +from google.cloud.compute_v1.types.compute import InsertRegionHealthCheckRequest +from google.cloud.compute_v1.types.compute import InsertRegionHealthCheckServiceRequest +from google.cloud.compute_v1.types.compute import InsertRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import InsertRegionNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import InsertRegionNotificationEndpointRequest +from google.cloud.compute_v1.types.compute import InsertRegionSslCertificateRequest +from google.cloud.compute_v1.types.compute import InsertRegionTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import InsertRegionTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import InsertRegionUrlMapRequest +from google.cloud.compute_v1.types.compute import InsertReservationRequest +from google.cloud.compute_v1.types.compute import InsertResourcePolicyRequest +from google.cloud.compute_v1.types.compute import InsertRouteRequest +from google.cloud.compute_v1.types.compute import InsertRouterRequest +from google.cloud.compute_v1.types.compute import InsertSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import InsertServiceAttachmentRequest +from google.cloud.compute_v1.types.compute import InsertSslCertificateRequest +from google.cloud.compute_v1.types.compute import InsertSslPolicyRequest +from google.cloud.compute_v1.types.compute import InsertSubnetworkRequest +from google.cloud.compute_v1.types.compute import InsertTargetGrpcProxyRequest +from google.cloud.compute_v1.types.compute import InsertTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import InsertTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import InsertTargetInstanceRequest +from google.cloud.compute_v1.types.compute import InsertTargetPoolRequest +from google.cloud.compute_v1.types.compute import InsertTargetSslProxyRequest +from google.cloud.compute_v1.types.compute import InsertTargetTcpProxyRequest +from google.cloud.compute_v1.types.compute import InsertTargetVpnGatewayRequest +from google.cloud.compute_v1.types.compute import InsertUrlMapRequest +from google.cloud.compute_v1.types.compute import InsertVpnGatewayRequest +from google.cloud.compute_v1.types.compute import InsertVpnTunnelRequest +from google.cloud.compute_v1.types.compute import Instance +from google.cloud.compute_v1.types.compute import InstanceAggregatedList +from google.cloud.compute_v1.types.compute import InstanceGroup +from google.cloud.compute_v1.types.compute import InstanceGroupAggregatedList +from google.cloud.compute_v1.types.compute import InstanceGroupList +from google.cloud.compute_v1.types.compute import InstanceGroupManager +from google.cloud.compute_v1.types.compute import InstanceGroupManagerActionsSummary +from google.cloud.compute_v1.types.compute import InstanceGroupManagerAggregatedList +from google.cloud.compute_v1.types.compute import InstanceGroupManagerAutoHealingPolicy +from google.cloud.compute_v1.types.compute import InstanceGroupManagerList +from google.cloud.compute_v1.types.compute import InstanceGroupManagersAbandonInstancesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupManagersApplyUpdatesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupManagersCreateInstancesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupManagersDeleteInstancesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupManagersDeletePerInstanceConfigsReq +from google.cloud.compute_v1.types.compute import InstanceGroupManagersListErrorsResponse +from google.cloud.compute_v1.types.compute import InstanceGroupManagersListManagedInstancesResponse +from google.cloud.compute_v1.types.compute import InstanceGroupManagersListPerInstanceConfigsResp +from google.cloud.compute_v1.types.compute import InstanceGroupManagersPatchPerInstanceConfigsReq +from google.cloud.compute_v1.types.compute import InstanceGroupManagersRecreateInstancesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupManagersScopedList +from google.cloud.compute_v1.types.compute import InstanceGroupManagersSetInstanceTemplateRequest +from google.cloud.compute_v1.types.compute import InstanceGroupManagersSetTargetPoolsRequest +from google.cloud.compute_v1.types.compute import InstanceGroupManagerStatus +from google.cloud.compute_v1.types.compute import InstanceGroupManagerStatusStateful +from google.cloud.compute_v1.types.compute import InstanceGroupManagerStatusStatefulPerInstanceConfigs +from google.cloud.compute_v1.types.compute import InstanceGroupManagerStatusVersionTarget +from google.cloud.compute_v1.types.compute import InstanceGroupManagersUpdatePerInstanceConfigsReq +from google.cloud.compute_v1.types.compute import InstanceGroupManagerUpdatePolicy +from google.cloud.compute_v1.types.compute import InstanceGroupManagerVersion +from google.cloud.compute_v1.types.compute import InstanceGroupsAddInstancesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupsListInstances +from google.cloud.compute_v1.types.compute import InstanceGroupsListInstancesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupsRemoveInstancesRequest +from google.cloud.compute_v1.types.compute import InstanceGroupsScopedList +from google.cloud.compute_v1.types.compute import InstanceGroupsSetNamedPortsRequest +from google.cloud.compute_v1.types.compute import InstanceList +from google.cloud.compute_v1.types.compute import InstanceListReferrers +from google.cloud.compute_v1.types.compute import InstanceManagedByIgmError +from google.cloud.compute_v1.types.compute import InstanceManagedByIgmErrorInstanceActionDetails +from google.cloud.compute_v1.types.compute import InstanceManagedByIgmErrorManagedInstanceError +from google.cloud.compute_v1.types.compute import InstanceMoveRequest +from google.cloud.compute_v1.types.compute import InstanceProperties +from google.cloud.compute_v1.types.compute import InstanceReference +from google.cloud.compute_v1.types.compute import InstancesAddResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import InstancesGetEffectiveFirewallsResponse +from google.cloud.compute_v1.types.compute import InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy +from google.cloud.compute_v1.types.compute import InstancesRemoveResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import InstancesScopedList +from google.cloud.compute_v1.types.compute import InstancesSetLabelsRequest +from google.cloud.compute_v1.types.compute import InstancesSetMachineResourcesRequest +from google.cloud.compute_v1.types.compute import InstancesSetMachineTypeRequest +from google.cloud.compute_v1.types.compute import InstancesSetMinCpuPlatformRequest +from google.cloud.compute_v1.types.compute import InstancesSetServiceAccountRequest +from google.cloud.compute_v1.types.compute import InstancesStartWithEncryptionKeyRequest +from google.cloud.compute_v1.types.compute import InstanceTemplate +from google.cloud.compute_v1.types.compute import InstanceTemplateList +from google.cloud.compute_v1.types.compute import InstanceWithNamedPorts +from google.cloud.compute_v1.types.compute import Int64RangeMatch +from google.cloud.compute_v1.types.compute import Interconnect +from google.cloud.compute_v1.types.compute import InterconnectAttachment +from google.cloud.compute_v1.types.compute import InterconnectAttachmentAggregatedList +from google.cloud.compute_v1.types.compute import InterconnectAttachmentList +from google.cloud.compute_v1.types.compute import InterconnectAttachmentPartnerMetadata +from google.cloud.compute_v1.types.compute import InterconnectAttachmentPrivateInfo +from google.cloud.compute_v1.types.compute import InterconnectAttachmentsScopedList +from google.cloud.compute_v1.types.compute import InterconnectCircuitInfo +from google.cloud.compute_v1.types.compute import InterconnectDiagnostics +from google.cloud.compute_v1.types.compute import InterconnectDiagnosticsARPEntry +from google.cloud.compute_v1.types.compute import InterconnectDiagnosticsLinkLACPStatus +from google.cloud.compute_v1.types.compute import InterconnectDiagnosticsLinkOpticalPower +from google.cloud.compute_v1.types.compute import InterconnectDiagnosticsLinkStatus +from google.cloud.compute_v1.types.compute import InterconnectList +from google.cloud.compute_v1.types.compute import InterconnectLocation +from google.cloud.compute_v1.types.compute import InterconnectLocationList +from google.cloud.compute_v1.types.compute import InterconnectLocationRegionInfo +from google.cloud.compute_v1.types.compute import InterconnectOutageNotification +from google.cloud.compute_v1.types.compute import InterconnectsGetDiagnosticsResponse +from google.cloud.compute_v1.types.compute import InvalidateCacheUrlMapRequest +from google.cloud.compute_v1.types.compute import Items +from google.cloud.compute_v1.types.compute import License +from google.cloud.compute_v1.types.compute import LicenseCode +from google.cloud.compute_v1.types.compute import LicenseCodeLicenseAlias +from google.cloud.compute_v1.types.compute import LicenseResourceCommitment +from google.cloud.compute_v1.types.compute import LicenseResourceRequirements +from google.cloud.compute_v1.types.compute import LicensesListResponse +from google.cloud.compute_v1.types.compute import ListAcceleratorTypesRequest +from google.cloud.compute_v1.types.compute import ListAddressesRequest +from google.cloud.compute_v1.types.compute import ListAssociationsFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import ListAutoscalersRequest +from google.cloud.compute_v1.types.compute import ListAvailableFeaturesSslPoliciesRequest +from google.cloud.compute_v1.types.compute import ListBackendBucketsRequest +from google.cloud.compute_v1.types.compute import ListBackendServicesRequest +from google.cloud.compute_v1.types.compute import ListDisksRequest +from google.cloud.compute_v1.types.compute import ListDiskTypesRequest +from google.cloud.compute_v1.types.compute import ListErrorsInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListErrorsRegionInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListExternalVpnGatewaysRequest +from google.cloud.compute_v1.types.compute import ListFirewallPoliciesRequest +from google.cloud.compute_v1.types.compute import ListFirewallsRequest +from google.cloud.compute_v1.types.compute import ListForwardingRulesRequest +from google.cloud.compute_v1.types.compute import ListGlobalAddressesRequest +from google.cloud.compute_v1.types.compute import ListGlobalForwardingRulesRequest +from google.cloud.compute_v1.types.compute import ListGlobalNetworkEndpointGroupsRequest +from google.cloud.compute_v1.types.compute import ListGlobalOperationsRequest +from google.cloud.compute_v1.types.compute import ListGlobalOrganizationOperationsRequest +from google.cloud.compute_v1.types.compute import ListGlobalPublicDelegatedPrefixesRequest +from google.cloud.compute_v1.types.compute import ListHealthChecksRequest +from google.cloud.compute_v1.types.compute import ListImagesRequest +from google.cloud.compute_v1.types.compute import ListInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListInstanceGroupsRequest +from google.cloud.compute_v1.types.compute import ListInstancesInstanceGroupsRequest +from google.cloud.compute_v1.types.compute import ListInstancesRegionInstanceGroupsRequest +from google.cloud.compute_v1.types.compute import ListInstancesRequest +from google.cloud.compute_v1.types.compute import ListInstanceTemplatesRequest +from google.cloud.compute_v1.types.compute import ListInterconnectAttachmentsRequest +from google.cloud.compute_v1.types.compute import ListInterconnectLocationsRequest +from google.cloud.compute_v1.types.compute import ListInterconnectsRequest +from google.cloud.compute_v1.types.compute import ListLicensesRequest +from google.cloud.compute_v1.types.compute import ListMachineTypesRequest +from google.cloud.compute_v1.types.compute import ListManagedInstancesInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListManagedInstancesRegionInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListNetworkEndpointGroupsRequest +from google.cloud.compute_v1.types.compute import ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest +from google.cloud.compute_v1.types.compute import ListNetworkEndpointsNetworkEndpointGroupsRequest +from google.cloud.compute_v1.types.compute import ListNetworksRequest +from google.cloud.compute_v1.types.compute import ListNodeGroupsRequest +from google.cloud.compute_v1.types.compute import ListNodesNodeGroupsRequest +from google.cloud.compute_v1.types.compute import ListNodeTemplatesRequest +from google.cloud.compute_v1.types.compute import ListNodeTypesRequest +from google.cloud.compute_v1.types.compute import ListPacketMirroringsRequest +from google.cloud.compute_v1.types.compute import ListPeeringRoutesNetworksRequest +from google.cloud.compute_v1.types.compute import ListPerInstanceConfigsInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListPerInstanceConfigsRegionInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListPreconfiguredExpressionSetsSecurityPoliciesRequest +from google.cloud.compute_v1.types.compute import ListPublicAdvertisedPrefixesRequest +from google.cloud.compute_v1.types.compute import ListPublicDelegatedPrefixesRequest +from google.cloud.compute_v1.types.compute import ListReferrersInstancesRequest +from google.cloud.compute_v1.types.compute import ListRegionAutoscalersRequest +from google.cloud.compute_v1.types.compute import ListRegionBackendServicesRequest +from google.cloud.compute_v1.types.compute import ListRegionCommitmentsRequest +from google.cloud.compute_v1.types.compute import ListRegionDisksRequest +from google.cloud.compute_v1.types.compute import ListRegionDiskTypesRequest +from google.cloud.compute_v1.types.compute import ListRegionHealthCheckServicesRequest +from google.cloud.compute_v1.types.compute import ListRegionHealthChecksRequest +from google.cloud.compute_v1.types.compute import ListRegionInstanceGroupManagersRequest +from google.cloud.compute_v1.types.compute import ListRegionInstanceGroupsRequest +from google.cloud.compute_v1.types.compute import ListRegionNetworkEndpointGroupsRequest +from google.cloud.compute_v1.types.compute import ListRegionNotificationEndpointsRequest +from google.cloud.compute_v1.types.compute import ListRegionOperationsRequest +from google.cloud.compute_v1.types.compute import ListRegionsRequest +from google.cloud.compute_v1.types.compute import ListRegionSslCertificatesRequest +from google.cloud.compute_v1.types.compute import ListRegionTargetHttpProxiesRequest +from google.cloud.compute_v1.types.compute import ListRegionTargetHttpsProxiesRequest +from google.cloud.compute_v1.types.compute import ListRegionUrlMapsRequest +from google.cloud.compute_v1.types.compute import ListReservationsRequest +from google.cloud.compute_v1.types.compute import ListResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import ListRoutersRequest +from google.cloud.compute_v1.types.compute import ListRoutesRequest +from google.cloud.compute_v1.types.compute import ListSecurityPoliciesRequest +from google.cloud.compute_v1.types.compute import ListServiceAttachmentsRequest +from google.cloud.compute_v1.types.compute import ListSnapshotsRequest +from google.cloud.compute_v1.types.compute import ListSslCertificatesRequest +from google.cloud.compute_v1.types.compute import ListSslPoliciesRequest +from google.cloud.compute_v1.types.compute import ListSubnetworksRequest +from google.cloud.compute_v1.types.compute import ListTargetGrpcProxiesRequest +from google.cloud.compute_v1.types.compute import ListTargetHttpProxiesRequest +from google.cloud.compute_v1.types.compute import ListTargetHttpsProxiesRequest +from google.cloud.compute_v1.types.compute import ListTargetInstancesRequest +from google.cloud.compute_v1.types.compute import ListTargetPoolsRequest +from google.cloud.compute_v1.types.compute import ListTargetSslProxiesRequest +from google.cloud.compute_v1.types.compute import ListTargetTcpProxiesRequest +from google.cloud.compute_v1.types.compute import ListTargetVpnGatewaysRequest +from google.cloud.compute_v1.types.compute import ListUrlMapsRequest +from google.cloud.compute_v1.types.compute import ListUsableSubnetworksRequest +from google.cloud.compute_v1.types.compute import ListVpnGatewaysRequest +from google.cloud.compute_v1.types.compute import ListVpnTunnelsRequest +from google.cloud.compute_v1.types.compute import ListXpnHostsProjectsRequest +from google.cloud.compute_v1.types.compute import ListZoneOperationsRequest +from google.cloud.compute_v1.types.compute import ListZonesRequest +from google.cloud.compute_v1.types.compute import LocalDisk +from google.cloud.compute_v1.types.compute import LocationPolicy +from google.cloud.compute_v1.types.compute import LocationPolicyLocation +from google.cloud.compute_v1.types.compute import LogConfig +from google.cloud.compute_v1.types.compute import LogConfigCloudAuditOptions +from google.cloud.compute_v1.types.compute import LogConfigCounterOptions +from google.cloud.compute_v1.types.compute import LogConfigCounterOptionsCustomField +from google.cloud.compute_v1.types.compute import LogConfigDataAccessOptions +from google.cloud.compute_v1.types.compute import MachineType +from google.cloud.compute_v1.types.compute import MachineTypeAggregatedList +from google.cloud.compute_v1.types.compute import MachineTypeList +from google.cloud.compute_v1.types.compute import MachineTypesScopedList +from google.cloud.compute_v1.types.compute import ManagedInstance +from google.cloud.compute_v1.types.compute import ManagedInstanceInstanceHealth +from google.cloud.compute_v1.types.compute import ManagedInstanceLastAttempt +from google.cloud.compute_v1.types.compute import ManagedInstanceVersion +from google.cloud.compute_v1.types.compute import Metadata +from google.cloud.compute_v1.types.compute import MetadataFilter +from google.cloud.compute_v1.types.compute import MetadataFilterLabelMatch +from google.cloud.compute_v1.types.compute import MoveDiskProjectRequest +from google.cloud.compute_v1.types.compute import MoveFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import MoveInstanceProjectRequest +from google.cloud.compute_v1.types.compute import NamedPort +from google.cloud.compute_v1.types.compute import Network +from google.cloud.compute_v1.types.compute import NetworkEndpoint +from google.cloud.compute_v1.types.compute import NetworkEndpointGroup +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupAggregatedList +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupAppEngine +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupCloudFunction +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupCloudRun +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupList +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupsAttachEndpointsRequest +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupsDetachEndpointsRequest +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupsListEndpointsRequest +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupsListNetworkEndpoints +from google.cloud.compute_v1.types.compute import NetworkEndpointGroupsScopedList +from google.cloud.compute_v1.types.compute import NetworkEndpointWithHealthStatus +from google.cloud.compute_v1.types.compute import NetworkInterface +from google.cloud.compute_v1.types.compute import NetworkList +from google.cloud.compute_v1.types.compute import NetworkPeering +from google.cloud.compute_v1.types.compute import NetworkRoutingConfig +from google.cloud.compute_v1.types.compute import NetworksAddPeeringRequest +from google.cloud.compute_v1.types.compute import NetworksGetEffectiveFirewallsResponse +from google.cloud.compute_v1.types.compute import NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy +from google.cloud.compute_v1.types.compute import NetworksRemovePeeringRequest +from google.cloud.compute_v1.types.compute import NetworksUpdatePeeringRequest +from google.cloud.compute_v1.types.compute import NodeGroup +from google.cloud.compute_v1.types.compute import NodeGroupAggregatedList +from google.cloud.compute_v1.types.compute import NodeGroupAutoscalingPolicy +from google.cloud.compute_v1.types.compute import NodeGroupList +from google.cloud.compute_v1.types.compute import NodeGroupMaintenanceWindow +from google.cloud.compute_v1.types.compute import NodeGroupNode +from google.cloud.compute_v1.types.compute import NodeGroupsAddNodesRequest +from google.cloud.compute_v1.types.compute import NodeGroupsDeleteNodesRequest +from google.cloud.compute_v1.types.compute import NodeGroupsListNodes +from google.cloud.compute_v1.types.compute import NodeGroupsScopedList +from google.cloud.compute_v1.types.compute import NodeGroupsSetNodeTemplateRequest +from google.cloud.compute_v1.types.compute import NodeTemplate +from google.cloud.compute_v1.types.compute import NodeTemplateAggregatedList +from google.cloud.compute_v1.types.compute import NodeTemplateList +from google.cloud.compute_v1.types.compute import NodeTemplateNodeTypeFlexibility +from google.cloud.compute_v1.types.compute import NodeTemplatesScopedList +from google.cloud.compute_v1.types.compute import NodeType +from google.cloud.compute_v1.types.compute import NodeTypeAggregatedList +from google.cloud.compute_v1.types.compute import NodeTypeList +from google.cloud.compute_v1.types.compute import NodeTypesScopedList +from google.cloud.compute_v1.types.compute import NotificationEndpoint +from google.cloud.compute_v1.types.compute import NotificationEndpointGrpcSettings +from google.cloud.compute_v1.types.compute import NotificationEndpointList +from google.cloud.compute_v1.types.compute import Operation +from google.cloud.compute_v1.types.compute import OperationAggregatedList +from google.cloud.compute_v1.types.compute import OperationList +from google.cloud.compute_v1.types.compute import OperationsScopedList +from google.cloud.compute_v1.types.compute import OutlierDetection +from google.cloud.compute_v1.types.compute import PacketMirroring +from google.cloud.compute_v1.types.compute import PacketMirroringAggregatedList +from google.cloud.compute_v1.types.compute import PacketMirroringFilter +from google.cloud.compute_v1.types.compute import PacketMirroringForwardingRuleInfo +from google.cloud.compute_v1.types.compute import PacketMirroringList +from google.cloud.compute_v1.types.compute import PacketMirroringMirroredResourceInfo +from google.cloud.compute_v1.types.compute import PacketMirroringMirroredResourceInfoInstanceInfo +from google.cloud.compute_v1.types.compute import PacketMirroringMirroredResourceInfoSubnetInfo +from google.cloud.compute_v1.types.compute import PacketMirroringNetworkInfo +from google.cloud.compute_v1.types.compute import PacketMirroringsScopedList +from google.cloud.compute_v1.types.compute import PatchAutoscalerRequest +from google.cloud.compute_v1.types.compute import PatchBackendBucketRequest +from google.cloud.compute_v1.types.compute import PatchBackendServiceRequest +from google.cloud.compute_v1.types.compute import PatchFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import PatchFirewallRequest +from google.cloud.compute_v1.types.compute import PatchForwardingRuleRequest +from google.cloud.compute_v1.types.compute import PatchGlobalForwardingRuleRequest +from google.cloud.compute_v1.types.compute import PatchGlobalPublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import PatchHealthCheckRequest +from google.cloud.compute_v1.types.compute import PatchImageRequest +from google.cloud.compute_v1.types.compute import PatchInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import PatchInterconnectAttachmentRequest +from google.cloud.compute_v1.types.compute import PatchInterconnectRequest +from google.cloud.compute_v1.types.compute import PatchNetworkRequest +from google.cloud.compute_v1.types.compute import PatchNodeGroupRequest +from google.cloud.compute_v1.types.compute import PatchPacketMirroringRequest +from google.cloud.compute_v1.types.compute import PatchPerInstanceConfigsInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import PatchPerInstanceConfigsRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import PatchPublicAdvertisedPrefixeRequest +from google.cloud.compute_v1.types.compute import PatchPublicDelegatedPrefixeRequest +from google.cloud.compute_v1.types.compute import PatchRegionAutoscalerRequest +from google.cloud.compute_v1.types.compute import PatchRegionBackendServiceRequest +from google.cloud.compute_v1.types.compute import PatchRegionHealthCheckRequest +from google.cloud.compute_v1.types.compute import PatchRegionHealthCheckServiceRequest +from google.cloud.compute_v1.types.compute import PatchRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import PatchRegionUrlMapRequest +from google.cloud.compute_v1.types.compute import PatchRouterRequest +from google.cloud.compute_v1.types.compute import PatchRuleFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import PatchRuleSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import PatchSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import PatchServiceAttachmentRequest +from google.cloud.compute_v1.types.compute import PatchSslPolicyRequest +from google.cloud.compute_v1.types.compute import PatchSubnetworkRequest +from google.cloud.compute_v1.types.compute import PatchTargetGrpcProxyRequest +from google.cloud.compute_v1.types.compute import PatchTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import PatchTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import PatchUrlMapRequest +from google.cloud.compute_v1.types.compute import PathMatcher +from google.cloud.compute_v1.types.compute import PathRule +from google.cloud.compute_v1.types.compute import PerInstanceConfig +from google.cloud.compute_v1.types.compute import Policy +from google.cloud.compute_v1.types.compute import PreconfiguredWafSet +from google.cloud.compute_v1.types.compute import PreservedState +from google.cloud.compute_v1.types.compute import PreservedStatePreservedDisk +from google.cloud.compute_v1.types.compute import PreviewRouterRequest +from google.cloud.compute_v1.types.compute import Project +from google.cloud.compute_v1.types.compute import ProjectsDisableXpnResourceRequest +from google.cloud.compute_v1.types.compute import ProjectsEnableXpnResourceRequest +from google.cloud.compute_v1.types.compute import ProjectsGetXpnResources +from google.cloud.compute_v1.types.compute import ProjectsListXpnHostsRequest +from google.cloud.compute_v1.types.compute import ProjectsSetDefaultNetworkTierRequest +from google.cloud.compute_v1.types.compute import PublicAdvertisedPrefix +from google.cloud.compute_v1.types.compute import PublicAdvertisedPrefixList +from google.cloud.compute_v1.types.compute import PublicAdvertisedPrefixPublicDelegatedPrefix +from google.cloud.compute_v1.types.compute import PublicDelegatedPrefix +from google.cloud.compute_v1.types.compute import PublicDelegatedPrefixAggregatedList +from google.cloud.compute_v1.types.compute import PublicDelegatedPrefixesScopedList +from google.cloud.compute_v1.types.compute import PublicDelegatedPrefixList +from google.cloud.compute_v1.types.compute import PublicDelegatedPrefixPublicDelegatedSubPrefix +from google.cloud.compute_v1.types.compute import Quota +from google.cloud.compute_v1.types.compute import RawDisk +from google.cloud.compute_v1.types.compute import RecreateInstancesInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import RecreateInstancesRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import Reference +from google.cloud.compute_v1.types.compute import Region +from google.cloud.compute_v1.types.compute import RegionAutoscalerList +from google.cloud.compute_v1.types.compute import RegionDisksAddResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import RegionDisksRemoveResourcePoliciesRequest +from google.cloud.compute_v1.types.compute import RegionDisksResizeRequest +from google.cloud.compute_v1.types.compute import RegionDiskTypeList +from google.cloud.compute_v1.types.compute import RegionInstanceGroupList +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagerDeleteInstanceConfigReq +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagerList +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagerPatchInstanceConfigReq +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersAbandonInstancesRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersApplyUpdatesRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersCreateInstancesRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersDeleteInstancesRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersListErrorsResponse +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersListInstanceConfigsResp +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersListInstancesResponse +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersRecreateRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersSetTargetPoolsRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagersSetTemplateRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupManagerUpdateInstanceConfigReq +from google.cloud.compute_v1.types.compute import RegionInstanceGroupsListInstances +from google.cloud.compute_v1.types.compute import RegionInstanceGroupsListInstancesRequest +from google.cloud.compute_v1.types.compute import RegionInstanceGroupsSetNamedPortsRequest +from google.cloud.compute_v1.types.compute import RegionList +from google.cloud.compute_v1.types.compute import RegionSetLabelsRequest +from google.cloud.compute_v1.types.compute import RegionSetPolicyRequest +from google.cloud.compute_v1.types.compute import RegionTargetHttpsProxiesSetSslCertificatesRequest +from google.cloud.compute_v1.types.compute import RegionUrlMapsValidateRequest +from google.cloud.compute_v1.types.compute import RemoveAssociationFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import RemoveHealthCheckTargetPoolRequest +from google.cloud.compute_v1.types.compute import RemoveInstancesInstanceGroupRequest +from google.cloud.compute_v1.types.compute import RemoveInstanceTargetPoolRequest +from google.cloud.compute_v1.types.compute import RemovePeeringNetworkRequest +from google.cloud.compute_v1.types.compute import RemoveResourcePoliciesDiskRequest +from google.cloud.compute_v1.types.compute import RemoveResourcePoliciesInstanceRequest +from google.cloud.compute_v1.types.compute import RemoveResourcePoliciesRegionDiskRequest +from google.cloud.compute_v1.types.compute import RemoveRuleFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import RemoveRuleSecurityPolicyRequest +from google.cloud.compute_v1.types.compute import RequestMirrorPolicy +from google.cloud.compute_v1.types.compute import Reservation +from google.cloud.compute_v1.types.compute import ReservationAffinity +from google.cloud.compute_v1.types.compute import ReservationAggregatedList +from google.cloud.compute_v1.types.compute import ReservationList +from google.cloud.compute_v1.types.compute import ReservationsResizeRequest +from google.cloud.compute_v1.types.compute import ReservationsScopedList +from google.cloud.compute_v1.types.compute import ResetInstanceRequest +from google.cloud.compute_v1.types.compute import ResizeDiskRequest +from google.cloud.compute_v1.types.compute import ResizeInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import ResizeRegionDiskRequest +from google.cloud.compute_v1.types.compute import ResizeRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import ResizeReservationRequest +from google.cloud.compute_v1.types.compute import ResourceCommitment +from google.cloud.compute_v1.types.compute import ResourceGroupReference +from google.cloud.compute_v1.types.compute import ResourcePoliciesScopedList +from google.cloud.compute_v1.types.compute import ResourcePolicy +from google.cloud.compute_v1.types.compute import ResourcePolicyAggregatedList +from google.cloud.compute_v1.types.compute import ResourcePolicyDailyCycle +from google.cloud.compute_v1.types.compute import ResourcePolicyGroupPlacementPolicy +from google.cloud.compute_v1.types.compute import ResourcePolicyHourlyCycle +from google.cloud.compute_v1.types.compute import ResourcePolicyInstanceSchedulePolicy +from google.cloud.compute_v1.types.compute import ResourcePolicyInstanceSchedulePolicySchedule +from google.cloud.compute_v1.types.compute import ResourcePolicyList +from google.cloud.compute_v1.types.compute import ResourcePolicyResourceStatus +from google.cloud.compute_v1.types.compute import ResourcePolicyResourceStatusInstanceSchedulePolicyStatus +from google.cloud.compute_v1.types.compute import ResourcePolicySnapshotSchedulePolicy +from google.cloud.compute_v1.types.compute import ResourcePolicySnapshotSchedulePolicyRetentionPolicy +from google.cloud.compute_v1.types.compute import ResourcePolicySnapshotSchedulePolicySchedule +from google.cloud.compute_v1.types.compute import ResourcePolicySnapshotSchedulePolicySnapshotProperties +from google.cloud.compute_v1.types.compute import ResourcePolicyWeeklyCycle +from google.cloud.compute_v1.types.compute import ResourcePolicyWeeklyCycleDayOfWeek +from google.cloud.compute_v1.types.compute import Route +from google.cloud.compute_v1.types.compute import RouteAsPath +from google.cloud.compute_v1.types.compute import RouteList +from google.cloud.compute_v1.types.compute import Router +from google.cloud.compute_v1.types.compute import RouterAdvertisedIpRange +from google.cloud.compute_v1.types.compute import RouterAggregatedList +from google.cloud.compute_v1.types.compute import RouterBgp +from google.cloud.compute_v1.types.compute import RouterBgpPeer +from google.cloud.compute_v1.types.compute import RouterBgpPeerBfd +from google.cloud.compute_v1.types.compute import RouterInterface +from google.cloud.compute_v1.types.compute import RouterList +from google.cloud.compute_v1.types.compute import RouterNat +from google.cloud.compute_v1.types.compute import RouterNatLogConfig +from google.cloud.compute_v1.types.compute import RouterNatRule +from google.cloud.compute_v1.types.compute import RouterNatRuleAction +from google.cloud.compute_v1.types.compute import RouterNatSubnetworkToNat +from google.cloud.compute_v1.types.compute import RoutersPreviewResponse +from google.cloud.compute_v1.types.compute import RoutersScopedList +from google.cloud.compute_v1.types.compute import RouterStatus +from google.cloud.compute_v1.types.compute import RouterStatusBgpPeerStatus +from google.cloud.compute_v1.types.compute import RouterStatusNatStatus +from google.cloud.compute_v1.types.compute import RouterStatusNatStatusNatRuleStatus +from google.cloud.compute_v1.types.compute import RouterStatusResponse +from google.cloud.compute_v1.types.compute import Rule +from google.cloud.compute_v1.types.compute import ScalingScheduleStatus +from google.cloud.compute_v1.types.compute import Scheduling +from google.cloud.compute_v1.types.compute import SchedulingNodeAffinity +from google.cloud.compute_v1.types.compute import ScratchDisks +from google.cloud.compute_v1.types.compute import Screenshot +from google.cloud.compute_v1.types.compute import SecurityPoliciesListPreconfiguredExpressionSetsResponse +from google.cloud.compute_v1.types.compute import SecurityPoliciesWafConfig +from google.cloud.compute_v1.types.compute import SecurityPolicy +from google.cloud.compute_v1.types.compute import SecurityPolicyAdaptiveProtectionConfig +from google.cloud.compute_v1.types.compute import SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig +from google.cloud.compute_v1.types.compute import SecurityPolicyAdvancedOptionsConfig +from google.cloud.compute_v1.types.compute import SecurityPolicyList +from google.cloud.compute_v1.types.compute import SecurityPolicyReference +from google.cloud.compute_v1.types.compute import SecurityPolicyRule +from google.cloud.compute_v1.types.compute import SecurityPolicyRuleMatcher +from google.cloud.compute_v1.types.compute import SecurityPolicyRuleMatcherConfig +from google.cloud.compute_v1.types.compute import SecuritySettings +from google.cloud.compute_v1.types.compute import SendDiagnosticInterruptInstanceRequest +from google.cloud.compute_v1.types.compute import SendDiagnosticInterruptInstanceResponse +from google.cloud.compute_v1.types.compute import SerialPortOutput +from google.cloud.compute_v1.types.compute import ServerBinding +from google.cloud.compute_v1.types.compute import ServiceAccount +from google.cloud.compute_v1.types.compute import ServiceAttachment +from google.cloud.compute_v1.types.compute import ServiceAttachmentAggregatedList +from google.cloud.compute_v1.types.compute import ServiceAttachmentConnectedEndpoint +from google.cloud.compute_v1.types.compute import ServiceAttachmentConsumerProjectLimit +from google.cloud.compute_v1.types.compute import ServiceAttachmentList +from google.cloud.compute_v1.types.compute import ServiceAttachmentsScopedList +from google.cloud.compute_v1.types.compute import SetBackendServiceTargetSslProxyRequest +from google.cloud.compute_v1.types.compute import SetBackendServiceTargetTcpProxyRequest +from google.cloud.compute_v1.types.compute import SetBackupTargetPoolRequest +from google.cloud.compute_v1.types.compute import SetCommonInstanceMetadataProjectRequest +from google.cloud.compute_v1.types.compute import SetDefaultNetworkTierProjectRequest +from google.cloud.compute_v1.types.compute import SetDeletionProtectionInstanceRequest +from google.cloud.compute_v1.types.compute import SetDiskAutoDeleteInstanceRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyDiskRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyImageRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyInstanceRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyInstanceTemplateRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyLicenseRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyNodeGroupRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyNodeTemplateRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyRegionDiskRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyReservationRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyResourcePolicyRequest +from google.cloud.compute_v1.types.compute import SetIamPolicyServiceAttachmentRequest +from google.cloud.compute_v1.types.compute import SetIamPolicySnapshotRequest +from google.cloud.compute_v1.types.compute import SetIamPolicySubnetworkRequest +from google.cloud.compute_v1.types.compute import SetInstanceTemplateInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import SetInstanceTemplateRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import SetLabelsDiskRequest +from google.cloud.compute_v1.types.compute import SetLabelsExternalVpnGatewayRequest +from google.cloud.compute_v1.types.compute import SetLabelsForwardingRuleRequest +from google.cloud.compute_v1.types.compute import SetLabelsGlobalForwardingRuleRequest +from google.cloud.compute_v1.types.compute import SetLabelsImageRequest +from google.cloud.compute_v1.types.compute import SetLabelsInstanceRequest +from google.cloud.compute_v1.types.compute import SetLabelsRegionDiskRequest +from google.cloud.compute_v1.types.compute import SetLabelsSnapshotRequest +from google.cloud.compute_v1.types.compute import SetLabelsVpnGatewayRequest +from google.cloud.compute_v1.types.compute import SetMachineResourcesInstanceRequest +from google.cloud.compute_v1.types.compute import SetMachineTypeInstanceRequest +from google.cloud.compute_v1.types.compute import SetMetadataInstanceRequest +from google.cloud.compute_v1.types.compute import SetMinCpuPlatformInstanceRequest +from google.cloud.compute_v1.types.compute import SetNamedPortsInstanceGroupRequest +from google.cloud.compute_v1.types.compute import SetNamedPortsRegionInstanceGroupRequest +from google.cloud.compute_v1.types.compute import SetNodeTemplateNodeGroupRequest +from google.cloud.compute_v1.types.compute import SetPrivateIpGoogleAccessSubnetworkRequest +from google.cloud.compute_v1.types.compute import SetProxyHeaderTargetSslProxyRequest +from google.cloud.compute_v1.types.compute import SetProxyHeaderTargetTcpProxyRequest +from google.cloud.compute_v1.types.compute import SetQuicOverrideTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import SetSchedulingInstanceRequest +from google.cloud.compute_v1.types.compute import SetSecurityPolicyBackendServiceRequest +from google.cloud.compute_v1.types.compute import SetServiceAccountInstanceRequest +from google.cloud.compute_v1.types.compute import SetShieldedInstanceIntegrityPolicyInstanceRequest +from google.cloud.compute_v1.types.compute import SetSslCertificatesRegionTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import SetSslCertificatesTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import SetSslCertificatesTargetSslProxyRequest +from google.cloud.compute_v1.types.compute import SetSslPolicyTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import SetSslPolicyTargetSslProxyRequest +from google.cloud.compute_v1.types.compute import SetTagsInstanceRequest +from google.cloud.compute_v1.types.compute import SetTargetForwardingRuleRequest +from google.cloud.compute_v1.types.compute import SetTargetGlobalForwardingRuleRequest +from google.cloud.compute_v1.types.compute import SetTargetPoolsInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import SetTargetPoolsRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import SetUrlMapRegionTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import SetUrlMapRegionTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import SetUrlMapTargetHttpProxyRequest +from google.cloud.compute_v1.types.compute import SetUrlMapTargetHttpsProxyRequest +from google.cloud.compute_v1.types.compute import SetUsageExportBucketProjectRequest +from google.cloud.compute_v1.types.compute import ShieldedInstanceConfig +from google.cloud.compute_v1.types.compute import ShieldedInstanceIdentity +from google.cloud.compute_v1.types.compute import ShieldedInstanceIdentityEntry +from google.cloud.compute_v1.types.compute import ShieldedInstanceIntegrityPolicy +from google.cloud.compute_v1.types.compute import SignedUrlKey +from google.cloud.compute_v1.types.compute import SimulateMaintenanceEventInstanceRequest +from google.cloud.compute_v1.types.compute import Snapshot +from google.cloud.compute_v1.types.compute import SnapshotList +from google.cloud.compute_v1.types.compute import SourceInstanceParams +from google.cloud.compute_v1.types.compute import SslCertificate +from google.cloud.compute_v1.types.compute import SslCertificateAggregatedList +from google.cloud.compute_v1.types.compute import SslCertificateList +from google.cloud.compute_v1.types.compute import SslCertificateManagedSslCertificate +from google.cloud.compute_v1.types.compute import SslCertificateSelfManagedSslCertificate +from google.cloud.compute_v1.types.compute import SslCertificatesScopedList +from google.cloud.compute_v1.types.compute import SSLHealthCheck +from google.cloud.compute_v1.types.compute import SslPoliciesList +from google.cloud.compute_v1.types.compute import SslPoliciesListAvailableFeaturesResponse +from google.cloud.compute_v1.types.compute import SslPolicy +from google.cloud.compute_v1.types.compute import SslPolicyReference +from google.cloud.compute_v1.types.compute import StartInstanceRequest +from google.cloud.compute_v1.types.compute import StartWithEncryptionKeyInstanceRequest +from google.cloud.compute_v1.types.compute import StatefulPolicy +from google.cloud.compute_v1.types.compute import StatefulPolicyPreservedState +from google.cloud.compute_v1.types.compute import StatefulPolicyPreservedStateDiskDevice +from google.cloud.compute_v1.types.compute import StopInstanceRequest +from google.cloud.compute_v1.types.compute import Subnetwork +from google.cloud.compute_v1.types.compute import SubnetworkAggregatedList +from google.cloud.compute_v1.types.compute import SubnetworkList +from google.cloud.compute_v1.types.compute import SubnetworkLogConfig +from google.cloud.compute_v1.types.compute import SubnetworkSecondaryRange +from google.cloud.compute_v1.types.compute import SubnetworksExpandIpCidrRangeRequest +from google.cloud.compute_v1.types.compute import SubnetworksScopedList +from google.cloud.compute_v1.types.compute import SubnetworksSetPrivateIpGoogleAccessRequest +from google.cloud.compute_v1.types.compute import Subsetting +from google.cloud.compute_v1.types.compute import SwitchToCustomModeNetworkRequest +from google.cloud.compute_v1.types.compute import Tags +from google.cloud.compute_v1.types.compute import TargetGrpcProxy +from google.cloud.compute_v1.types.compute import TargetGrpcProxyList +from google.cloud.compute_v1.types.compute import TargetHttpProxiesScopedList +from google.cloud.compute_v1.types.compute import TargetHttpProxy +from google.cloud.compute_v1.types.compute import TargetHttpProxyAggregatedList +from google.cloud.compute_v1.types.compute import TargetHttpProxyList +from google.cloud.compute_v1.types.compute import TargetHttpsProxiesScopedList +from google.cloud.compute_v1.types.compute import TargetHttpsProxiesSetQuicOverrideRequest +from google.cloud.compute_v1.types.compute import TargetHttpsProxiesSetSslCertificatesRequest +from google.cloud.compute_v1.types.compute import TargetHttpsProxy +from google.cloud.compute_v1.types.compute import TargetHttpsProxyAggregatedList +from google.cloud.compute_v1.types.compute import TargetHttpsProxyList +from google.cloud.compute_v1.types.compute import TargetInstance +from google.cloud.compute_v1.types.compute import TargetInstanceAggregatedList +from google.cloud.compute_v1.types.compute import TargetInstanceList +from google.cloud.compute_v1.types.compute import TargetInstancesScopedList +from google.cloud.compute_v1.types.compute import TargetPool +from google.cloud.compute_v1.types.compute import TargetPoolAggregatedList +from google.cloud.compute_v1.types.compute import TargetPoolInstanceHealth +from google.cloud.compute_v1.types.compute import TargetPoolList +from google.cloud.compute_v1.types.compute import TargetPoolsAddHealthCheckRequest +from google.cloud.compute_v1.types.compute import TargetPoolsAddInstanceRequest +from google.cloud.compute_v1.types.compute import TargetPoolsRemoveHealthCheckRequest +from google.cloud.compute_v1.types.compute import TargetPoolsRemoveInstanceRequest +from google.cloud.compute_v1.types.compute import TargetPoolsScopedList +from google.cloud.compute_v1.types.compute import TargetReference +from google.cloud.compute_v1.types.compute import TargetSslProxiesSetBackendServiceRequest +from google.cloud.compute_v1.types.compute import TargetSslProxiesSetProxyHeaderRequest +from google.cloud.compute_v1.types.compute import TargetSslProxiesSetSslCertificatesRequest +from google.cloud.compute_v1.types.compute import TargetSslProxy +from google.cloud.compute_v1.types.compute import TargetSslProxyList +from google.cloud.compute_v1.types.compute import TargetTcpProxiesSetBackendServiceRequest +from google.cloud.compute_v1.types.compute import TargetTcpProxiesSetProxyHeaderRequest +from google.cloud.compute_v1.types.compute import TargetTcpProxy +from google.cloud.compute_v1.types.compute import TargetTcpProxyList +from google.cloud.compute_v1.types.compute import TargetVpnGateway +from google.cloud.compute_v1.types.compute import TargetVpnGatewayAggregatedList +from google.cloud.compute_v1.types.compute import TargetVpnGatewayList +from google.cloud.compute_v1.types.compute import TargetVpnGatewaysScopedList +from google.cloud.compute_v1.types.compute import TCPHealthCheck +from google.cloud.compute_v1.types.compute import TestFailure +from google.cloud.compute_v1.types.compute import TestIamPermissionsDiskRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsExternalVpnGatewayRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsFirewallPolicyRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsImageRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsInstanceRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsInstanceTemplateRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsLicenseCodeRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsLicenseRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsNetworkEndpointGroupRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsNodeGroupRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsNodeTemplateRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsPacketMirroringRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsRegionDiskRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsReservationRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsResourcePolicyRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsServiceAttachmentRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsSnapshotRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsSubnetworkRequest +from google.cloud.compute_v1.types.compute import TestIamPermissionsVpnGatewayRequest +from google.cloud.compute_v1.types.compute import TestPermissionsRequest +from google.cloud.compute_v1.types.compute import TestPermissionsResponse +from google.cloud.compute_v1.types.compute import Uint128 +from google.cloud.compute_v1.types.compute import UpdateAccessConfigInstanceRequest +from google.cloud.compute_v1.types.compute import UpdateAutoscalerRequest +from google.cloud.compute_v1.types.compute import UpdateBackendBucketRequest +from google.cloud.compute_v1.types.compute import UpdateBackendServiceRequest +from google.cloud.compute_v1.types.compute import UpdateDisplayDeviceInstanceRequest +from google.cloud.compute_v1.types.compute import UpdateFirewallRequest +from google.cloud.compute_v1.types.compute import UpdateHealthCheckRequest +from google.cloud.compute_v1.types.compute import UpdateInstanceRequest +from google.cloud.compute_v1.types.compute import UpdateNetworkInterfaceInstanceRequest +from google.cloud.compute_v1.types.compute import UpdatePeeringNetworkRequest +from google.cloud.compute_v1.types.compute import UpdatePerInstanceConfigsInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest +from google.cloud.compute_v1.types.compute import UpdateRegionAutoscalerRequest +from google.cloud.compute_v1.types.compute import UpdateRegionBackendServiceRequest +from google.cloud.compute_v1.types.compute import UpdateRegionHealthCheckRequest +from google.cloud.compute_v1.types.compute import UpdateRegionUrlMapRequest +from google.cloud.compute_v1.types.compute import UpdateRouterRequest +from google.cloud.compute_v1.types.compute import UpdateShieldedInstanceConfigInstanceRequest +from google.cloud.compute_v1.types.compute import UpdateUrlMapRequest +from google.cloud.compute_v1.types.compute import UrlMap +from google.cloud.compute_v1.types.compute import UrlMapList +from google.cloud.compute_v1.types.compute import UrlMapReference +from google.cloud.compute_v1.types.compute import UrlMapsAggregatedList +from google.cloud.compute_v1.types.compute import UrlMapsScopedList +from google.cloud.compute_v1.types.compute import UrlMapsValidateRequest +from google.cloud.compute_v1.types.compute import UrlMapsValidateResponse +from google.cloud.compute_v1.types.compute import UrlMapTest +from google.cloud.compute_v1.types.compute import UrlMapTestHeader +from google.cloud.compute_v1.types.compute import UrlMapValidationResult +from google.cloud.compute_v1.types.compute import UrlRewrite +from google.cloud.compute_v1.types.compute import UsableSubnetwork +from google.cloud.compute_v1.types.compute import UsableSubnetworksAggregatedList +from google.cloud.compute_v1.types.compute import UsableSubnetworkSecondaryRange +from google.cloud.compute_v1.types.compute import UsageExportLocation +from google.cloud.compute_v1.types.compute import ValidateRegionUrlMapRequest +from google.cloud.compute_v1.types.compute import ValidateUrlMapRequest +from google.cloud.compute_v1.types.compute import VmEndpointNatMappings +from google.cloud.compute_v1.types.compute import VmEndpointNatMappingsInterfaceNatMappings +from google.cloud.compute_v1.types.compute import VmEndpointNatMappingsList +from google.cloud.compute_v1.types.compute import VpnGateway +from google.cloud.compute_v1.types.compute import VpnGatewayAggregatedList +from google.cloud.compute_v1.types.compute import VpnGatewayList +from google.cloud.compute_v1.types.compute import VpnGatewaysGetStatusResponse +from google.cloud.compute_v1.types.compute import VpnGatewaysScopedList +from google.cloud.compute_v1.types.compute import VpnGatewayStatus +from google.cloud.compute_v1.types.compute import VpnGatewayStatusHighAvailabilityRequirementState +from google.cloud.compute_v1.types.compute import VpnGatewayStatusTunnel +from google.cloud.compute_v1.types.compute import VpnGatewayStatusVpnConnection +from google.cloud.compute_v1.types.compute import VpnGatewayVpnGatewayInterface +from google.cloud.compute_v1.types.compute import VpnTunnel +from google.cloud.compute_v1.types.compute import VpnTunnelAggregatedList +from google.cloud.compute_v1.types.compute import VpnTunnelList +from google.cloud.compute_v1.types.compute import VpnTunnelsScopedList +from google.cloud.compute_v1.types.compute import WafExpressionSet +from google.cloud.compute_v1.types.compute import WafExpressionSetExpression +from google.cloud.compute_v1.types.compute import WaitGlobalOperationRequest +from google.cloud.compute_v1.types.compute import WaitRegionOperationRequest +from google.cloud.compute_v1.types.compute import WaitZoneOperationRequest +from google.cloud.compute_v1.types.compute import Warning +from google.cloud.compute_v1.types.compute import Warnings +from google.cloud.compute_v1.types.compute import WeightedBackendService +from google.cloud.compute_v1.types.compute import XpnHostList +from google.cloud.compute_v1.types.compute import XpnResourceId +from google.cloud.compute_v1.types.compute import Zone +from google.cloud.compute_v1.types.compute import ZoneList +from google.cloud.compute_v1.types.compute import ZoneSetLabelsRequest +from google.cloud.compute_v1.types.compute import ZoneSetPolicyRequest + +__all__ = ('AcceleratorTypesClient', + 'AddressesClient', + 'AutoscalersClient', + 'BackendBucketsClient', + 'BackendServicesClient', + 'DisksClient', + 'DiskTypesClient', + 'ExternalVpnGatewaysClient', + 'FirewallPoliciesClient', + 'FirewallsClient', + 'ForwardingRulesClient', + 'GlobalAddressesClient', + 'GlobalForwardingRulesClient', + 'GlobalNetworkEndpointGroupsClient', + 'GlobalOperationsClient', + 'GlobalOrganizationOperationsClient', + 'GlobalPublicDelegatedPrefixesClient', + 'HealthChecksClient', + 'ImageFamilyViewsClient', + 'ImagesClient', + 'InstanceGroupManagersClient', + 'InstanceGroupsClient', + 'InstancesClient', + 'InstanceTemplatesClient', + 'InterconnectAttachmentsClient', + 'InterconnectLocationsClient', + 'InterconnectsClient', + 'LicenseCodesClient', + 'LicensesClient', + 'MachineTypesClient', + 'NetworkEndpointGroupsClient', + 'NetworksClient', + 'NodeGroupsClient', + 'NodeTemplatesClient', + 'NodeTypesClient', + 'PacketMirroringsClient', + 'ProjectsClient', + 'PublicAdvertisedPrefixesClient', + 'PublicDelegatedPrefixesClient', + 'RegionAutoscalersClient', + 'RegionBackendServicesClient', + 'RegionCommitmentsClient', + 'RegionDisksClient', + 'RegionDiskTypesClient', + 'RegionHealthChecksClient', + 'RegionHealthCheckServicesClient', + 'RegionInstanceGroupManagersClient', + 'RegionInstanceGroupsClient', + 'RegionInstancesClient', + 'RegionNetworkEndpointGroupsClient', + 'RegionNotificationEndpointsClient', + 'RegionOperationsClient', + 'RegionsClient', + 'RegionSslCertificatesClient', + 'RegionTargetHttpProxiesClient', + 'RegionTargetHttpsProxiesClient', + 'RegionUrlMapsClient', + 'ReservationsClient', + 'ResourcePoliciesClient', + 'RoutersClient', + 'RoutesClient', + 'SecurityPoliciesClient', + 'ServiceAttachmentsClient', + 'SnapshotsClient', + 'SslCertificatesClient', + 'SslPoliciesClient', + 'SubnetworksClient', + 'TargetGrpcProxiesClient', + 'TargetHttpProxiesClient', + 'TargetHttpsProxiesClient', + 'TargetInstancesClient', + 'TargetPoolsClient', + 'TargetSslProxiesClient', + 'TargetTcpProxiesClient', + 'TargetVpnGatewaysClient', + 'UrlMapsClient', + 'VpnGatewaysClient', + 'VpnTunnelsClient', + 'ZoneOperationsClient', + 'ZonesClient', + 'AbandonInstancesInstanceGroupManagerRequest', + 'AbandonInstancesRegionInstanceGroupManagerRequest', + 'AcceleratorConfig', + 'Accelerators', + 'AcceleratorType', + 'AcceleratorTypeAggregatedList', + 'AcceleratorTypeList', + 'AcceleratorTypesScopedList', + 'AccessConfig', + 'AddAccessConfigInstanceRequest', + 'AddAssociationFirewallPolicyRequest', + 'AddHealthCheckTargetPoolRequest', + 'AddInstancesInstanceGroupRequest', + 'AddInstanceTargetPoolRequest', + 'AddNodesNodeGroupRequest', + 'AddPeeringNetworkRequest', + 'AddResourcePoliciesDiskRequest', + 'AddResourcePoliciesInstanceRequest', + 'AddResourcePoliciesRegionDiskRequest', + 'Address', + 'AddressAggregatedList', + 'AddressesScopedList', + 'AddressList', + 'AddRuleFirewallPolicyRequest', + 'AddRuleSecurityPolicyRequest', + 'AddSignedUrlKeyBackendBucketRequest', + 'AddSignedUrlKeyBackendServiceRequest', + 'AdvancedMachineFeatures', + 'AggregatedListAcceleratorTypesRequest', + 'AggregatedListAddressesRequest', + 'AggregatedListAutoscalersRequest', + 'AggregatedListBackendServicesRequest', + 'AggregatedListDisksRequest', + 'AggregatedListDiskTypesRequest', + 'AggregatedListForwardingRulesRequest', + 'AggregatedListGlobalOperationsRequest', + 'AggregatedListHealthChecksRequest', + 'AggregatedListInstanceGroupManagersRequest', + 'AggregatedListInstanceGroupsRequest', + 'AggregatedListInstancesRequest', + 'AggregatedListInterconnectAttachmentsRequest', + 'AggregatedListMachineTypesRequest', + 'AggregatedListNetworkEndpointGroupsRequest', + 'AggregatedListNodeGroupsRequest', + 'AggregatedListNodeTemplatesRequest', + 'AggregatedListNodeTypesRequest', + 'AggregatedListPacketMirroringsRequest', + 'AggregatedListPublicDelegatedPrefixesRequest', + 'AggregatedListRegionCommitmentsRequest', + 'AggregatedListReservationsRequest', + 'AggregatedListResourcePoliciesRequest', + 'AggregatedListRoutersRequest', + 'AggregatedListServiceAttachmentsRequest', + 'AggregatedListSslCertificatesRequest', + 'AggregatedListSubnetworksRequest', + 'AggregatedListTargetHttpProxiesRequest', + 'AggregatedListTargetHttpsProxiesRequest', + 'AggregatedListTargetInstancesRequest', + 'AggregatedListTargetPoolsRequest', + 'AggregatedListTargetVpnGatewaysRequest', + 'AggregatedListUrlMapsRequest', + 'AggregatedListVpnGatewaysRequest', + 'AggregatedListVpnTunnelsRequest', + 'AliasIpRange', + 'AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk', + 'AllocationSpecificSKUAllocationReservedInstanceProperties', + 'AllocationSpecificSKUReservation', + 'Allowed', + 'ApplyUpdatesToInstancesInstanceGroupManagerRequest', + 'ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest', + 'AttachDiskInstanceRequest', + 'AttachedDisk', + 'AttachedDiskInitializeParams', + 'AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest', + 'AttachNetworkEndpointsNetworkEndpointGroupRequest', + 'AuditConfig', + 'AuditLogConfig', + 'AuthorizationLoggingOptions', + 'Autoscaler', + 'AutoscalerAggregatedList', + 'AutoscalerList', + 'AutoscalersScopedList', + 'AutoscalerStatusDetails', + 'AutoscalingPolicy', + 'AutoscalingPolicyCpuUtilization', + 'AutoscalingPolicyCustomMetricUtilization', + 'AutoscalingPolicyLoadBalancingUtilization', + 'AutoscalingPolicyScaleInControl', + 'AutoscalingPolicyScalingSchedule', + 'Backend', + 'BackendBucket', + 'BackendBucketCdnPolicy', + 'BackendBucketCdnPolicyBypassCacheOnRequestHeader', + 'BackendBucketCdnPolicyNegativeCachingPolicy', + 'BackendBucketList', + 'BackendService', + 'BackendServiceAggregatedList', + 'BackendServiceCdnPolicy', + 'BackendServiceCdnPolicyBypassCacheOnRequestHeader', + 'BackendServiceCdnPolicyNegativeCachingPolicy', + 'BackendServiceFailoverPolicy', + 'BackendServiceGroupHealth', + 'BackendServiceIAP', + 'BackendServiceList', + 'BackendServiceLogConfig', + 'BackendServiceReference', + 'BackendServicesScopedList', + 'Binding', + 'BulkInsertInstanceRequest', + 'BulkInsertInstanceResource', + 'BulkInsertInstanceResourcePerInstanceProperties', + 'BulkInsertRegionInstanceRequest', + 'CacheInvalidationRule', + 'CacheKeyPolicy', + 'CircuitBreakers', + 'CloneRulesFirewallPolicyRequest', + 'Commitment', + 'CommitmentAggregatedList', + 'CommitmentList', + 'CommitmentsScopedList', + 'Condition', + 'ConfidentialInstanceConfig', + 'ConnectionDraining', + 'ConsistentHashLoadBalancerSettings', + 'ConsistentHashLoadBalancerSettingsHttpCookie', + 'CorsPolicy', + 'CreateInstancesInstanceGroupManagerRequest', + 'CreateInstancesRegionInstanceGroupManagerRequest', + 'CreateSnapshotDiskRequest', + 'CreateSnapshotRegionDiskRequest', + 'CustomerEncryptionKey', + 'CustomerEncryptionKeyProtectedDisk', + 'Data', + 'DeleteAccessConfigInstanceRequest', + 'DeleteAddressRequest', + 'DeleteAutoscalerRequest', + 'DeleteBackendBucketRequest', + 'DeleteBackendServiceRequest', + 'DeleteDiskRequest', + 'DeleteExternalVpnGatewayRequest', + 'DeleteFirewallPolicyRequest', + 'DeleteFirewallRequest', + 'DeleteForwardingRuleRequest', + 'DeleteGlobalAddressRequest', + 'DeleteGlobalForwardingRuleRequest', + 'DeleteGlobalNetworkEndpointGroupRequest', + 'DeleteGlobalOperationRequest', + 'DeleteGlobalOperationResponse', + 'DeleteGlobalOrganizationOperationRequest', + 'DeleteGlobalOrganizationOperationResponse', + 'DeleteGlobalPublicDelegatedPrefixeRequest', + 'DeleteHealthCheckRequest', + 'DeleteImageRequest', + 'DeleteInstanceGroupManagerRequest', + 'DeleteInstanceGroupRequest', + 'DeleteInstanceRequest', + 'DeleteInstancesInstanceGroupManagerRequest', + 'DeleteInstancesRegionInstanceGroupManagerRequest', + 'DeleteInstanceTemplateRequest', + 'DeleteInterconnectAttachmentRequest', + 'DeleteInterconnectRequest', + 'DeleteLicenseRequest', + 'DeleteNetworkEndpointGroupRequest', + 'DeleteNetworkRequest', + 'DeleteNodeGroupRequest', + 'DeleteNodesNodeGroupRequest', + 'DeleteNodeTemplateRequest', + 'DeletePacketMirroringRequest', + 'DeletePerInstanceConfigsInstanceGroupManagerRequest', + 'DeletePerInstanceConfigsRegionInstanceGroupManagerRequest', + 'DeletePublicAdvertisedPrefixeRequest', + 'DeletePublicDelegatedPrefixeRequest', + 'DeleteRegionAutoscalerRequest', + 'DeleteRegionBackendServiceRequest', + 'DeleteRegionDiskRequest', + 'DeleteRegionHealthCheckRequest', + 'DeleteRegionHealthCheckServiceRequest', + 'DeleteRegionInstanceGroupManagerRequest', + 'DeleteRegionNetworkEndpointGroupRequest', + 'DeleteRegionNotificationEndpointRequest', + 'DeleteRegionOperationRequest', + 'DeleteRegionOperationResponse', + 'DeleteRegionSslCertificateRequest', + 'DeleteRegionTargetHttpProxyRequest', + 'DeleteRegionTargetHttpsProxyRequest', + 'DeleteRegionUrlMapRequest', + 'DeleteReservationRequest', + 'DeleteResourcePolicyRequest', + 'DeleteRouteRequest', + 'DeleteRouterRequest', + 'DeleteSecurityPolicyRequest', + 'DeleteServiceAttachmentRequest', + 'DeleteSignedUrlKeyBackendBucketRequest', + 'DeleteSignedUrlKeyBackendServiceRequest', + 'DeleteSnapshotRequest', + 'DeleteSslCertificateRequest', + 'DeleteSslPolicyRequest', + 'DeleteSubnetworkRequest', + 'DeleteTargetGrpcProxyRequest', + 'DeleteTargetHttpProxyRequest', + 'DeleteTargetHttpsProxyRequest', + 'DeleteTargetInstanceRequest', + 'DeleteTargetPoolRequest', + 'DeleteTargetSslProxyRequest', + 'DeleteTargetTcpProxyRequest', + 'DeleteTargetVpnGatewayRequest', + 'DeleteUrlMapRequest', + 'DeleteVpnGatewayRequest', + 'DeleteVpnTunnelRequest', + 'DeleteZoneOperationRequest', + 'DeleteZoneOperationResponse', + 'Denied', + 'DeprecateImageRequest', + 'DeprecationStatus', + 'DetachDiskInstanceRequest', + 'DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest', + 'DetachNetworkEndpointsNetworkEndpointGroupRequest', + 'DisableXpnHostProjectRequest', + 'DisableXpnResourceProjectRequest', + 'Disk', + 'DiskAggregatedList', + 'DiskInstantiationConfig', + 'DiskList', + 'DiskMoveRequest', + 'DisksAddResourcePoliciesRequest', + 'DisksRemoveResourcePoliciesRequest', + 'DisksResizeRequest', + 'DisksScopedList', + 'DiskType', + 'DiskTypeAggregatedList', + 'DiskTypeList', + 'DiskTypesScopedList', + 'DisplayDevice', + 'DistributionPolicy', + 'DistributionPolicyZoneConfiguration', + 'Duration', + 'EnableXpnHostProjectRequest', + 'EnableXpnResourceProjectRequest', + 'Error', + 'Errors', + 'ExchangedPeeringRoute', + 'ExchangedPeeringRoutesList', + 'ExpandIpCidrRangeSubnetworkRequest', + 'Expr', + 'ExternalVpnGateway', + 'ExternalVpnGatewayInterface', + 'ExternalVpnGatewayList', + 'FileContentBuffer', + 'Firewall', + 'FirewallList', + 'FirewallLogConfig', + 'FirewallPoliciesListAssociationsResponse', + 'FirewallPolicy', + 'FirewallPolicyAssociation', + 'FirewallPolicyList', + 'FirewallPolicyRule', + 'FirewallPolicyRuleMatcher', + 'FirewallPolicyRuleMatcherLayer4Config', + 'FixedOrPercent', + 'ForwardingRule', + 'ForwardingRuleAggregatedList', + 'ForwardingRuleList', + 'ForwardingRuleReference', + 'ForwardingRuleServiceDirectoryRegistration', + 'ForwardingRulesScopedList', + 'GetAcceleratorTypeRequest', + 'GetAddressRequest', + 'GetAssociationFirewallPolicyRequest', + 'GetAutoscalerRequest', + 'GetBackendBucketRequest', + 'GetBackendServiceRequest', + 'GetDiagnosticsInterconnectRequest', + 'GetDiskRequest', + 'GetDiskTypeRequest', + 'GetEffectiveFirewallsInstanceRequest', + 'GetEffectiveFirewallsNetworkRequest', + 'GetExternalVpnGatewayRequest', + 'GetFirewallPolicyRequest', + 'GetFirewallRequest', + 'GetForwardingRuleRequest', + 'GetFromFamilyImageRequest', + 'GetGlobalAddressRequest', + 'GetGlobalForwardingRuleRequest', + 'GetGlobalNetworkEndpointGroupRequest', + 'GetGlobalOperationRequest', + 'GetGlobalOrganizationOperationRequest', + 'GetGlobalPublicDelegatedPrefixeRequest', + 'GetGuestAttributesInstanceRequest', + 'GetHealthBackendServiceRequest', + 'GetHealthCheckRequest', + 'GetHealthRegionBackendServiceRequest', + 'GetHealthTargetPoolRequest', + 'GetIamPolicyDiskRequest', + 'GetIamPolicyFirewallPolicyRequest', + 'GetIamPolicyImageRequest', + 'GetIamPolicyInstanceRequest', + 'GetIamPolicyInstanceTemplateRequest', + 'GetIamPolicyLicenseRequest', + 'GetIamPolicyNodeGroupRequest', + 'GetIamPolicyNodeTemplateRequest', + 'GetIamPolicyRegionDiskRequest', + 'GetIamPolicyReservationRequest', + 'GetIamPolicyResourcePolicyRequest', + 'GetIamPolicyServiceAttachmentRequest', + 'GetIamPolicySnapshotRequest', + 'GetIamPolicySubnetworkRequest', + 'GetImageFamilyViewRequest', + 'GetImageRequest', + 'GetInstanceGroupManagerRequest', + 'GetInstanceGroupRequest', + 'GetInstanceRequest', + 'GetInstanceTemplateRequest', + 'GetInterconnectAttachmentRequest', + 'GetInterconnectLocationRequest', + 'GetInterconnectRequest', + 'GetLicenseCodeRequest', + 'GetLicenseRequest', + 'GetMachineTypeRequest', + 'GetNatMappingInfoRoutersRequest', + 'GetNetworkEndpointGroupRequest', + 'GetNetworkRequest', + 'GetNodeGroupRequest', + 'GetNodeTemplateRequest', + 'GetNodeTypeRequest', + 'GetPacketMirroringRequest', + 'GetProjectRequest', + 'GetPublicAdvertisedPrefixeRequest', + 'GetPublicDelegatedPrefixeRequest', + 'GetRegionAutoscalerRequest', + 'GetRegionBackendServiceRequest', + 'GetRegionCommitmentRequest', + 'GetRegionDiskRequest', + 'GetRegionDiskTypeRequest', + 'GetRegionHealthCheckRequest', + 'GetRegionHealthCheckServiceRequest', + 'GetRegionInstanceGroupManagerRequest', + 'GetRegionInstanceGroupRequest', + 'GetRegionNetworkEndpointGroupRequest', + 'GetRegionNotificationEndpointRequest', + 'GetRegionOperationRequest', + 'GetRegionRequest', + 'GetRegionSslCertificateRequest', + 'GetRegionTargetHttpProxyRequest', + 'GetRegionTargetHttpsProxyRequest', + 'GetRegionUrlMapRequest', + 'GetReservationRequest', + 'GetResourcePolicyRequest', + 'GetRouteRequest', + 'GetRouterRequest', + 'GetRouterStatusRouterRequest', + 'GetRuleFirewallPolicyRequest', + 'GetRuleSecurityPolicyRequest', + 'GetScreenshotInstanceRequest', + 'GetSecurityPolicyRequest', + 'GetSerialPortOutputInstanceRequest', + 'GetServiceAttachmentRequest', + 'GetShieldedInstanceIdentityInstanceRequest', + 'GetSnapshotRequest', + 'GetSslCertificateRequest', + 'GetSslPolicyRequest', + 'GetStatusVpnGatewayRequest', + 'GetSubnetworkRequest', + 'GetTargetGrpcProxyRequest', + 'GetTargetHttpProxyRequest', + 'GetTargetHttpsProxyRequest', + 'GetTargetInstanceRequest', + 'GetTargetPoolRequest', + 'GetTargetSslProxyRequest', + 'GetTargetTcpProxyRequest', + 'GetTargetVpnGatewayRequest', + 'GetUrlMapRequest', + 'GetVpnGatewayRequest', + 'GetVpnTunnelRequest', + 'GetXpnHostProjectRequest', + 'GetXpnResourcesProjectsRequest', + 'GetZoneOperationRequest', + 'GetZoneRequest', + 'GlobalNetworkEndpointGroupsAttachEndpointsRequest', + 'GlobalNetworkEndpointGroupsDetachEndpointsRequest', + 'GlobalOrganizationSetPolicyRequest', + 'GlobalSetLabelsRequest', + 'GlobalSetPolicyRequest', + 'GRPCHealthCheck', + 'GuestAttributes', + 'GuestAttributesEntry', + 'GuestAttributesValue', + 'GuestOsFeature', + 'HealthCheck', + 'HealthCheckList', + 'HealthCheckLogConfig', + 'HealthCheckReference', + 'HealthChecksAggregatedList', + 'HealthCheckService', + 'HealthCheckServiceReference', + 'HealthCheckServicesList', + 'HealthChecksScopedList', + 'HealthStatus', + 'HealthStatusForNetworkEndpoint', + 'HostRule', + 'HTTP2HealthCheck', + 'HttpFaultAbort', + 'HttpFaultDelay', + 'HttpFaultInjection', + 'HttpHeaderAction', + 'HttpHeaderMatch', + 'HttpHeaderOption', + 'HTTPHealthCheck', + 'HttpQueryParameterMatch', + 'HttpRedirectAction', + 'HttpRetryPolicy', + 'HttpRouteAction', + 'HttpRouteRule', + 'HttpRouteRuleMatch', + 'HTTPSHealthCheck', + 'Image', + 'ImageFamilyView', + 'ImageList', + 'InitialStateConfig', + 'InsertAddressRequest', + 'InsertAutoscalerRequest', + 'InsertBackendBucketRequest', + 'InsertBackendServiceRequest', + 'InsertDiskRequest', + 'InsertExternalVpnGatewayRequest', + 'InsertFirewallPolicyRequest', + 'InsertFirewallRequest', + 'InsertForwardingRuleRequest', + 'InsertGlobalAddressRequest', + 'InsertGlobalForwardingRuleRequest', + 'InsertGlobalNetworkEndpointGroupRequest', + 'InsertGlobalPublicDelegatedPrefixeRequest', + 'InsertHealthCheckRequest', + 'InsertImageRequest', + 'InsertInstanceGroupManagerRequest', + 'InsertInstanceGroupRequest', + 'InsertInstanceRequest', + 'InsertInstanceTemplateRequest', + 'InsertInterconnectAttachmentRequest', + 'InsertInterconnectRequest', + 'InsertLicenseRequest', + 'InsertNetworkEndpointGroupRequest', + 'InsertNetworkRequest', + 'InsertNodeGroupRequest', + 'InsertNodeTemplateRequest', + 'InsertPacketMirroringRequest', + 'InsertPublicAdvertisedPrefixeRequest', + 'InsertPublicDelegatedPrefixeRequest', + 'InsertRegionAutoscalerRequest', + 'InsertRegionBackendServiceRequest', + 'InsertRegionCommitmentRequest', + 'InsertRegionDiskRequest', + 'InsertRegionHealthCheckRequest', + 'InsertRegionHealthCheckServiceRequest', + 'InsertRegionInstanceGroupManagerRequest', + 'InsertRegionNetworkEndpointGroupRequest', + 'InsertRegionNotificationEndpointRequest', + 'InsertRegionSslCertificateRequest', + 'InsertRegionTargetHttpProxyRequest', + 'InsertRegionTargetHttpsProxyRequest', + 'InsertRegionUrlMapRequest', + 'InsertReservationRequest', + 'InsertResourcePolicyRequest', + 'InsertRouteRequest', + 'InsertRouterRequest', + 'InsertSecurityPolicyRequest', + 'InsertServiceAttachmentRequest', + 'InsertSslCertificateRequest', + 'InsertSslPolicyRequest', + 'InsertSubnetworkRequest', + 'InsertTargetGrpcProxyRequest', + 'InsertTargetHttpProxyRequest', + 'InsertTargetHttpsProxyRequest', + 'InsertTargetInstanceRequest', + 'InsertTargetPoolRequest', + 'InsertTargetSslProxyRequest', + 'InsertTargetTcpProxyRequest', + 'InsertTargetVpnGatewayRequest', + 'InsertUrlMapRequest', + 'InsertVpnGatewayRequest', + 'InsertVpnTunnelRequest', + 'Instance', + 'InstanceAggregatedList', + 'InstanceGroup', + 'InstanceGroupAggregatedList', + 'InstanceGroupList', + 'InstanceGroupManager', + 'InstanceGroupManagerActionsSummary', + 'InstanceGroupManagerAggregatedList', + 'InstanceGroupManagerAutoHealingPolicy', + 'InstanceGroupManagerList', + 'InstanceGroupManagersAbandonInstancesRequest', + 'InstanceGroupManagersApplyUpdatesRequest', + 'InstanceGroupManagersCreateInstancesRequest', + 'InstanceGroupManagersDeleteInstancesRequest', + 'InstanceGroupManagersDeletePerInstanceConfigsReq', + 'InstanceGroupManagersListErrorsResponse', + 'InstanceGroupManagersListManagedInstancesResponse', + 'InstanceGroupManagersListPerInstanceConfigsResp', + 'InstanceGroupManagersPatchPerInstanceConfigsReq', + 'InstanceGroupManagersRecreateInstancesRequest', + 'InstanceGroupManagersScopedList', + 'InstanceGroupManagersSetInstanceTemplateRequest', + 'InstanceGroupManagersSetTargetPoolsRequest', + 'InstanceGroupManagerStatus', + 'InstanceGroupManagerStatusStateful', + 'InstanceGroupManagerStatusStatefulPerInstanceConfigs', + 'InstanceGroupManagerStatusVersionTarget', + 'InstanceGroupManagersUpdatePerInstanceConfigsReq', + 'InstanceGroupManagerUpdatePolicy', + 'InstanceGroupManagerVersion', + 'InstanceGroupsAddInstancesRequest', + 'InstanceGroupsListInstances', + 'InstanceGroupsListInstancesRequest', + 'InstanceGroupsRemoveInstancesRequest', + 'InstanceGroupsScopedList', + 'InstanceGroupsSetNamedPortsRequest', + 'InstanceList', + 'InstanceListReferrers', + 'InstanceManagedByIgmError', + 'InstanceManagedByIgmErrorInstanceActionDetails', + 'InstanceManagedByIgmErrorManagedInstanceError', + 'InstanceMoveRequest', + 'InstanceProperties', + 'InstanceReference', + 'InstancesAddResourcePoliciesRequest', + 'InstancesGetEffectiveFirewallsResponse', + 'InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + 'InstancesRemoveResourcePoliciesRequest', + 'InstancesScopedList', + 'InstancesSetLabelsRequest', + 'InstancesSetMachineResourcesRequest', + 'InstancesSetMachineTypeRequest', + 'InstancesSetMinCpuPlatformRequest', + 'InstancesSetServiceAccountRequest', + 'InstancesStartWithEncryptionKeyRequest', + 'InstanceTemplate', + 'InstanceTemplateList', + 'InstanceWithNamedPorts', + 'Int64RangeMatch', + 'Interconnect', + 'InterconnectAttachment', + 'InterconnectAttachmentAggregatedList', + 'InterconnectAttachmentList', + 'InterconnectAttachmentPartnerMetadata', + 'InterconnectAttachmentPrivateInfo', + 'InterconnectAttachmentsScopedList', + 'InterconnectCircuitInfo', + 'InterconnectDiagnostics', + 'InterconnectDiagnosticsARPEntry', + 'InterconnectDiagnosticsLinkLACPStatus', + 'InterconnectDiagnosticsLinkOpticalPower', + 'InterconnectDiagnosticsLinkStatus', + 'InterconnectList', + 'InterconnectLocation', + 'InterconnectLocationList', + 'InterconnectLocationRegionInfo', + 'InterconnectOutageNotification', + 'InterconnectsGetDiagnosticsResponse', + 'InvalidateCacheUrlMapRequest', + 'Items', + 'License', + 'LicenseCode', + 'LicenseCodeLicenseAlias', + 'LicenseResourceCommitment', + 'LicenseResourceRequirements', + 'LicensesListResponse', + 'ListAcceleratorTypesRequest', + 'ListAddressesRequest', + 'ListAssociationsFirewallPolicyRequest', + 'ListAutoscalersRequest', + 'ListAvailableFeaturesSslPoliciesRequest', + 'ListBackendBucketsRequest', + 'ListBackendServicesRequest', + 'ListDisksRequest', + 'ListDiskTypesRequest', + 'ListErrorsInstanceGroupManagersRequest', + 'ListErrorsRegionInstanceGroupManagersRequest', + 'ListExternalVpnGatewaysRequest', + 'ListFirewallPoliciesRequest', + 'ListFirewallsRequest', + 'ListForwardingRulesRequest', + 'ListGlobalAddressesRequest', + 'ListGlobalForwardingRulesRequest', + 'ListGlobalNetworkEndpointGroupsRequest', + 'ListGlobalOperationsRequest', + 'ListGlobalOrganizationOperationsRequest', + 'ListGlobalPublicDelegatedPrefixesRequest', + 'ListHealthChecksRequest', + 'ListImagesRequest', + 'ListInstanceGroupManagersRequest', + 'ListInstanceGroupsRequest', + 'ListInstancesInstanceGroupsRequest', + 'ListInstancesRegionInstanceGroupsRequest', + 'ListInstancesRequest', + 'ListInstanceTemplatesRequest', + 'ListInterconnectAttachmentsRequest', + 'ListInterconnectLocationsRequest', + 'ListInterconnectsRequest', + 'ListLicensesRequest', + 'ListMachineTypesRequest', + 'ListManagedInstancesInstanceGroupManagersRequest', + 'ListManagedInstancesRegionInstanceGroupManagersRequest', + 'ListNetworkEndpointGroupsRequest', + 'ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest', + 'ListNetworkEndpointsNetworkEndpointGroupsRequest', + 'ListNetworksRequest', + 'ListNodeGroupsRequest', + 'ListNodesNodeGroupsRequest', + 'ListNodeTemplatesRequest', + 'ListNodeTypesRequest', + 'ListPacketMirroringsRequest', + 'ListPeeringRoutesNetworksRequest', + 'ListPerInstanceConfigsInstanceGroupManagersRequest', + 'ListPerInstanceConfigsRegionInstanceGroupManagersRequest', + 'ListPreconfiguredExpressionSetsSecurityPoliciesRequest', + 'ListPublicAdvertisedPrefixesRequest', + 'ListPublicDelegatedPrefixesRequest', + 'ListReferrersInstancesRequest', + 'ListRegionAutoscalersRequest', + 'ListRegionBackendServicesRequest', + 'ListRegionCommitmentsRequest', + 'ListRegionDisksRequest', + 'ListRegionDiskTypesRequest', + 'ListRegionHealthCheckServicesRequest', + 'ListRegionHealthChecksRequest', + 'ListRegionInstanceGroupManagersRequest', + 'ListRegionInstanceGroupsRequest', + 'ListRegionNetworkEndpointGroupsRequest', + 'ListRegionNotificationEndpointsRequest', + 'ListRegionOperationsRequest', + 'ListRegionsRequest', + 'ListRegionSslCertificatesRequest', + 'ListRegionTargetHttpProxiesRequest', + 'ListRegionTargetHttpsProxiesRequest', + 'ListRegionUrlMapsRequest', + 'ListReservationsRequest', + 'ListResourcePoliciesRequest', + 'ListRoutersRequest', + 'ListRoutesRequest', + 'ListSecurityPoliciesRequest', + 'ListServiceAttachmentsRequest', + 'ListSnapshotsRequest', + 'ListSslCertificatesRequest', + 'ListSslPoliciesRequest', + 'ListSubnetworksRequest', + 'ListTargetGrpcProxiesRequest', + 'ListTargetHttpProxiesRequest', + 'ListTargetHttpsProxiesRequest', + 'ListTargetInstancesRequest', + 'ListTargetPoolsRequest', + 'ListTargetSslProxiesRequest', + 'ListTargetTcpProxiesRequest', + 'ListTargetVpnGatewaysRequest', + 'ListUrlMapsRequest', + 'ListUsableSubnetworksRequest', + 'ListVpnGatewaysRequest', + 'ListVpnTunnelsRequest', + 'ListXpnHostsProjectsRequest', + 'ListZoneOperationsRequest', + 'ListZonesRequest', + 'LocalDisk', + 'LocationPolicy', + 'LocationPolicyLocation', + 'LogConfig', + 'LogConfigCloudAuditOptions', + 'LogConfigCounterOptions', + 'LogConfigCounterOptionsCustomField', + 'LogConfigDataAccessOptions', + 'MachineType', + 'MachineTypeAggregatedList', + 'MachineTypeList', + 'MachineTypesScopedList', + 'ManagedInstance', + 'ManagedInstanceInstanceHealth', + 'ManagedInstanceLastAttempt', + 'ManagedInstanceVersion', + 'Metadata', + 'MetadataFilter', + 'MetadataFilterLabelMatch', + 'MoveDiskProjectRequest', + 'MoveFirewallPolicyRequest', + 'MoveInstanceProjectRequest', + 'NamedPort', + 'Network', + 'NetworkEndpoint', + 'NetworkEndpointGroup', + 'NetworkEndpointGroupAggregatedList', + 'NetworkEndpointGroupAppEngine', + 'NetworkEndpointGroupCloudFunction', + 'NetworkEndpointGroupCloudRun', + 'NetworkEndpointGroupList', + 'NetworkEndpointGroupsAttachEndpointsRequest', + 'NetworkEndpointGroupsDetachEndpointsRequest', + 'NetworkEndpointGroupsListEndpointsRequest', + 'NetworkEndpointGroupsListNetworkEndpoints', + 'NetworkEndpointGroupsScopedList', + 'NetworkEndpointWithHealthStatus', + 'NetworkInterface', + 'NetworkList', + 'NetworkPeering', + 'NetworkRoutingConfig', + 'NetworksAddPeeringRequest', + 'NetworksGetEffectiveFirewallsResponse', + 'NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + 'NetworksRemovePeeringRequest', + 'NetworksUpdatePeeringRequest', + 'NodeGroup', + 'NodeGroupAggregatedList', + 'NodeGroupAutoscalingPolicy', + 'NodeGroupList', + 'NodeGroupMaintenanceWindow', + 'NodeGroupNode', + 'NodeGroupsAddNodesRequest', + 'NodeGroupsDeleteNodesRequest', + 'NodeGroupsListNodes', + 'NodeGroupsScopedList', + 'NodeGroupsSetNodeTemplateRequest', + 'NodeTemplate', + 'NodeTemplateAggregatedList', + 'NodeTemplateList', + 'NodeTemplateNodeTypeFlexibility', + 'NodeTemplatesScopedList', + 'NodeType', + 'NodeTypeAggregatedList', + 'NodeTypeList', + 'NodeTypesScopedList', + 'NotificationEndpoint', + 'NotificationEndpointGrpcSettings', + 'NotificationEndpointList', + 'Operation', + 'OperationAggregatedList', + 'OperationList', + 'OperationsScopedList', + 'OutlierDetection', + 'PacketMirroring', + 'PacketMirroringAggregatedList', + 'PacketMirroringFilter', + 'PacketMirroringForwardingRuleInfo', + 'PacketMirroringList', + 'PacketMirroringMirroredResourceInfo', + 'PacketMirroringMirroredResourceInfoInstanceInfo', + 'PacketMirroringMirroredResourceInfoSubnetInfo', + 'PacketMirroringNetworkInfo', + 'PacketMirroringsScopedList', + 'PatchAutoscalerRequest', + 'PatchBackendBucketRequest', + 'PatchBackendServiceRequest', + 'PatchFirewallPolicyRequest', + 'PatchFirewallRequest', + 'PatchForwardingRuleRequest', + 'PatchGlobalForwardingRuleRequest', + 'PatchGlobalPublicDelegatedPrefixeRequest', + 'PatchHealthCheckRequest', + 'PatchImageRequest', + 'PatchInstanceGroupManagerRequest', + 'PatchInterconnectAttachmentRequest', + 'PatchInterconnectRequest', + 'PatchNetworkRequest', + 'PatchNodeGroupRequest', + 'PatchPacketMirroringRequest', + 'PatchPerInstanceConfigsInstanceGroupManagerRequest', + 'PatchPerInstanceConfigsRegionInstanceGroupManagerRequest', + 'PatchPublicAdvertisedPrefixeRequest', + 'PatchPublicDelegatedPrefixeRequest', + 'PatchRegionAutoscalerRequest', + 'PatchRegionBackendServiceRequest', + 'PatchRegionHealthCheckRequest', + 'PatchRegionHealthCheckServiceRequest', + 'PatchRegionInstanceGroupManagerRequest', + 'PatchRegionUrlMapRequest', + 'PatchRouterRequest', + 'PatchRuleFirewallPolicyRequest', + 'PatchRuleSecurityPolicyRequest', + 'PatchSecurityPolicyRequest', + 'PatchServiceAttachmentRequest', + 'PatchSslPolicyRequest', + 'PatchSubnetworkRequest', + 'PatchTargetGrpcProxyRequest', + 'PatchTargetHttpProxyRequest', + 'PatchTargetHttpsProxyRequest', + 'PatchUrlMapRequest', + 'PathMatcher', + 'PathRule', + 'PerInstanceConfig', + 'Policy', + 'PreconfiguredWafSet', + 'PreservedState', + 'PreservedStatePreservedDisk', + 'PreviewRouterRequest', + 'Project', + 'ProjectsDisableXpnResourceRequest', + 'ProjectsEnableXpnResourceRequest', + 'ProjectsGetXpnResources', + 'ProjectsListXpnHostsRequest', + 'ProjectsSetDefaultNetworkTierRequest', + 'PublicAdvertisedPrefix', + 'PublicAdvertisedPrefixList', + 'PublicAdvertisedPrefixPublicDelegatedPrefix', + 'PublicDelegatedPrefix', + 'PublicDelegatedPrefixAggregatedList', + 'PublicDelegatedPrefixesScopedList', + 'PublicDelegatedPrefixList', + 'PublicDelegatedPrefixPublicDelegatedSubPrefix', + 'Quota', + 'RawDisk', + 'RecreateInstancesInstanceGroupManagerRequest', + 'RecreateInstancesRegionInstanceGroupManagerRequest', + 'Reference', + 'Region', + 'RegionAutoscalerList', + 'RegionDisksAddResourcePoliciesRequest', + 'RegionDisksRemoveResourcePoliciesRequest', + 'RegionDisksResizeRequest', + 'RegionDiskTypeList', + 'RegionInstanceGroupList', + 'RegionInstanceGroupManagerDeleteInstanceConfigReq', + 'RegionInstanceGroupManagerList', + 'RegionInstanceGroupManagerPatchInstanceConfigReq', + 'RegionInstanceGroupManagersAbandonInstancesRequest', + 'RegionInstanceGroupManagersApplyUpdatesRequest', + 'RegionInstanceGroupManagersCreateInstancesRequest', + 'RegionInstanceGroupManagersDeleteInstancesRequest', + 'RegionInstanceGroupManagersListErrorsResponse', + 'RegionInstanceGroupManagersListInstanceConfigsResp', + 'RegionInstanceGroupManagersListInstancesResponse', + 'RegionInstanceGroupManagersRecreateRequest', + 'RegionInstanceGroupManagersSetTargetPoolsRequest', + 'RegionInstanceGroupManagersSetTemplateRequest', + 'RegionInstanceGroupManagerUpdateInstanceConfigReq', + 'RegionInstanceGroupsListInstances', + 'RegionInstanceGroupsListInstancesRequest', + 'RegionInstanceGroupsSetNamedPortsRequest', + 'RegionList', + 'RegionSetLabelsRequest', + 'RegionSetPolicyRequest', + 'RegionTargetHttpsProxiesSetSslCertificatesRequest', + 'RegionUrlMapsValidateRequest', + 'RemoveAssociationFirewallPolicyRequest', + 'RemoveHealthCheckTargetPoolRequest', + 'RemoveInstancesInstanceGroupRequest', + 'RemoveInstanceTargetPoolRequest', + 'RemovePeeringNetworkRequest', + 'RemoveResourcePoliciesDiskRequest', + 'RemoveResourcePoliciesInstanceRequest', + 'RemoveResourcePoliciesRegionDiskRequest', + 'RemoveRuleFirewallPolicyRequest', + 'RemoveRuleSecurityPolicyRequest', + 'RequestMirrorPolicy', + 'Reservation', + 'ReservationAffinity', + 'ReservationAggregatedList', + 'ReservationList', + 'ReservationsResizeRequest', + 'ReservationsScopedList', + 'ResetInstanceRequest', + 'ResizeDiskRequest', + 'ResizeInstanceGroupManagerRequest', + 'ResizeRegionDiskRequest', + 'ResizeRegionInstanceGroupManagerRequest', + 'ResizeReservationRequest', + 'ResourceCommitment', + 'ResourceGroupReference', + 'ResourcePoliciesScopedList', + 'ResourcePolicy', + 'ResourcePolicyAggregatedList', + 'ResourcePolicyDailyCycle', + 'ResourcePolicyGroupPlacementPolicy', + 'ResourcePolicyHourlyCycle', + 'ResourcePolicyInstanceSchedulePolicy', + 'ResourcePolicyInstanceSchedulePolicySchedule', + 'ResourcePolicyList', + 'ResourcePolicyResourceStatus', + 'ResourcePolicyResourceStatusInstanceSchedulePolicyStatus', + 'ResourcePolicySnapshotSchedulePolicy', + 'ResourcePolicySnapshotSchedulePolicyRetentionPolicy', + 'ResourcePolicySnapshotSchedulePolicySchedule', + 'ResourcePolicySnapshotSchedulePolicySnapshotProperties', + 'ResourcePolicyWeeklyCycle', + 'ResourcePolicyWeeklyCycleDayOfWeek', + 'Route', + 'RouteAsPath', + 'RouteList', + 'Router', + 'RouterAdvertisedIpRange', + 'RouterAggregatedList', + 'RouterBgp', + 'RouterBgpPeer', + 'RouterBgpPeerBfd', + 'RouterInterface', + 'RouterList', + 'RouterNat', + 'RouterNatLogConfig', + 'RouterNatRule', + 'RouterNatRuleAction', + 'RouterNatSubnetworkToNat', + 'RoutersPreviewResponse', + 'RoutersScopedList', + 'RouterStatus', + 'RouterStatusBgpPeerStatus', + 'RouterStatusNatStatus', + 'RouterStatusNatStatusNatRuleStatus', + 'RouterStatusResponse', + 'Rule', + 'ScalingScheduleStatus', + 'Scheduling', + 'SchedulingNodeAffinity', + 'ScratchDisks', + 'Screenshot', + 'SecurityPoliciesListPreconfiguredExpressionSetsResponse', + 'SecurityPoliciesWafConfig', + 'SecurityPolicy', + 'SecurityPolicyAdaptiveProtectionConfig', + 'SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig', + 'SecurityPolicyAdvancedOptionsConfig', + 'SecurityPolicyList', + 'SecurityPolicyReference', + 'SecurityPolicyRule', + 'SecurityPolicyRuleMatcher', + 'SecurityPolicyRuleMatcherConfig', + 'SecuritySettings', + 'SendDiagnosticInterruptInstanceRequest', + 'SendDiagnosticInterruptInstanceResponse', + 'SerialPortOutput', + 'ServerBinding', + 'ServiceAccount', + 'ServiceAttachment', + 'ServiceAttachmentAggregatedList', + 'ServiceAttachmentConnectedEndpoint', + 'ServiceAttachmentConsumerProjectLimit', + 'ServiceAttachmentList', + 'ServiceAttachmentsScopedList', + 'SetBackendServiceTargetSslProxyRequest', + 'SetBackendServiceTargetTcpProxyRequest', + 'SetBackupTargetPoolRequest', + 'SetCommonInstanceMetadataProjectRequest', + 'SetDefaultNetworkTierProjectRequest', + 'SetDeletionProtectionInstanceRequest', + 'SetDiskAutoDeleteInstanceRequest', + 'SetIamPolicyDiskRequest', + 'SetIamPolicyFirewallPolicyRequest', + 'SetIamPolicyImageRequest', + 'SetIamPolicyInstanceRequest', + 'SetIamPolicyInstanceTemplateRequest', + 'SetIamPolicyLicenseRequest', + 'SetIamPolicyNodeGroupRequest', + 'SetIamPolicyNodeTemplateRequest', + 'SetIamPolicyRegionDiskRequest', + 'SetIamPolicyReservationRequest', + 'SetIamPolicyResourcePolicyRequest', + 'SetIamPolicyServiceAttachmentRequest', + 'SetIamPolicySnapshotRequest', + 'SetIamPolicySubnetworkRequest', + 'SetInstanceTemplateInstanceGroupManagerRequest', + 'SetInstanceTemplateRegionInstanceGroupManagerRequest', + 'SetLabelsDiskRequest', + 'SetLabelsExternalVpnGatewayRequest', + 'SetLabelsForwardingRuleRequest', + 'SetLabelsGlobalForwardingRuleRequest', + 'SetLabelsImageRequest', + 'SetLabelsInstanceRequest', + 'SetLabelsRegionDiskRequest', + 'SetLabelsSnapshotRequest', + 'SetLabelsVpnGatewayRequest', + 'SetMachineResourcesInstanceRequest', + 'SetMachineTypeInstanceRequest', + 'SetMetadataInstanceRequest', + 'SetMinCpuPlatformInstanceRequest', + 'SetNamedPortsInstanceGroupRequest', + 'SetNamedPortsRegionInstanceGroupRequest', + 'SetNodeTemplateNodeGroupRequest', + 'SetPrivateIpGoogleAccessSubnetworkRequest', + 'SetProxyHeaderTargetSslProxyRequest', + 'SetProxyHeaderTargetTcpProxyRequest', + 'SetQuicOverrideTargetHttpsProxyRequest', + 'SetSchedulingInstanceRequest', + 'SetSecurityPolicyBackendServiceRequest', + 'SetServiceAccountInstanceRequest', + 'SetShieldedInstanceIntegrityPolicyInstanceRequest', + 'SetSslCertificatesRegionTargetHttpsProxyRequest', + 'SetSslCertificatesTargetHttpsProxyRequest', + 'SetSslCertificatesTargetSslProxyRequest', + 'SetSslPolicyTargetHttpsProxyRequest', + 'SetSslPolicyTargetSslProxyRequest', + 'SetTagsInstanceRequest', + 'SetTargetForwardingRuleRequest', + 'SetTargetGlobalForwardingRuleRequest', + 'SetTargetPoolsInstanceGroupManagerRequest', + 'SetTargetPoolsRegionInstanceGroupManagerRequest', + 'SetUrlMapRegionTargetHttpProxyRequest', + 'SetUrlMapRegionTargetHttpsProxyRequest', + 'SetUrlMapTargetHttpProxyRequest', + 'SetUrlMapTargetHttpsProxyRequest', + 'SetUsageExportBucketProjectRequest', + 'ShieldedInstanceConfig', + 'ShieldedInstanceIdentity', + 'ShieldedInstanceIdentityEntry', + 'ShieldedInstanceIntegrityPolicy', + 'SignedUrlKey', + 'SimulateMaintenanceEventInstanceRequest', + 'Snapshot', + 'SnapshotList', + 'SourceInstanceParams', + 'SslCertificate', + 'SslCertificateAggregatedList', + 'SslCertificateList', + 'SslCertificateManagedSslCertificate', + 'SslCertificateSelfManagedSslCertificate', + 'SslCertificatesScopedList', + 'SSLHealthCheck', + 'SslPoliciesList', + 'SslPoliciesListAvailableFeaturesResponse', + 'SslPolicy', + 'SslPolicyReference', + 'StartInstanceRequest', + 'StartWithEncryptionKeyInstanceRequest', + 'StatefulPolicy', + 'StatefulPolicyPreservedState', + 'StatefulPolicyPreservedStateDiskDevice', + 'StopInstanceRequest', + 'Subnetwork', + 'SubnetworkAggregatedList', + 'SubnetworkList', + 'SubnetworkLogConfig', + 'SubnetworkSecondaryRange', + 'SubnetworksExpandIpCidrRangeRequest', + 'SubnetworksScopedList', + 'SubnetworksSetPrivateIpGoogleAccessRequest', + 'Subsetting', + 'SwitchToCustomModeNetworkRequest', + 'Tags', + 'TargetGrpcProxy', + 'TargetGrpcProxyList', + 'TargetHttpProxiesScopedList', + 'TargetHttpProxy', + 'TargetHttpProxyAggregatedList', + 'TargetHttpProxyList', + 'TargetHttpsProxiesScopedList', + 'TargetHttpsProxiesSetQuicOverrideRequest', + 'TargetHttpsProxiesSetSslCertificatesRequest', + 'TargetHttpsProxy', + 'TargetHttpsProxyAggregatedList', + 'TargetHttpsProxyList', + 'TargetInstance', + 'TargetInstanceAggregatedList', + 'TargetInstanceList', + 'TargetInstancesScopedList', + 'TargetPool', + 'TargetPoolAggregatedList', + 'TargetPoolInstanceHealth', + 'TargetPoolList', + 'TargetPoolsAddHealthCheckRequest', + 'TargetPoolsAddInstanceRequest', + 'TargetPoolsRemoveHealthCheckRequest', + 'TargetPoolsRemoveInstanceRequest', + 'TargetPoolsScopedList', + 'TargetReference', + 'TargetSslProxiesSetBackendServiceRequest', + 'TargetSslProxiesSetProxyHeaderRequest', + 'TargetSslProxiesSetSslCertificatesRequest', + 'TargetSslProxy', + 'TargetSslProxyList', + 'TargetTcpProxiesSetBackendServiceRequest', + 'TargetTcpProxiesSetProxyHeaderRequest', + 'TargetTcpProxy', + 'TargetTcpProxyList', + 'TargetVpnGateway', + 'TargetVpnGatewayAggregatedList', + 'TargetVpnGatewayList', + 'TargetVpnGatewaysScopedList', + 'TCPHealthCheck', + 'TestFailure', + 'TestIamPermissionsDiskRequest', + 'TestIamPermissionsExternalVpnGatewayRequest', + 'TestIamPermissionsFirewallPolicyRequest', + 'TestIamPermissionsImageRequest', + 'TestIamPermissionsInstanceRequest', + 'TestIamPermissionsInstanceTemplateRequest', + 'TestIamPermissionsLicenseCodeRequest', + 'TestIamPermissionsLicenseRequest', + 'TestIamPermissionsNetworkEndpointGroupRequest', + 'TestIamPermissionsNodeGroupRequest', + 'TestIamPermissionsNodeTemplateRequest', + 'TestIamPermissionsPacketMirroringRequest', + 'TestIamPermissionsRegionDiskRequest', + 'TestIamPermissionsReservationRequest', + 'TestIamPermissionsResourcePolicyRequest', + 'TestIamPermissionsServiceAttachmentRequest', + 'TestIamPermissionsSnapshotRequest', + 'TestIamPermissionsSubnetworkRequest', + 'TestIamPermissionsVpnGatewayRequest', + 'TestPermissionsRequest', + 'TestPermissionsResponse', + 'Uint128', + 'UpdateAccessConfigInstanceRequest', + 'UpdateAutoscalerRequest', + 'UpdateBackendBucketRequest', + 'UpdateBackendServiceRequest', + 'UpdateDisplayDeviceInstanceRequest', + 'UpdateFirewallRequest', + 'UpdateHealthCheckRequest', + 'UpdateInstanceRequest', + 'UpdateNetworkInterfaceInstanceRequest', + 'UpdatePeeringNetworkRequest', + 'UpdatePerInstanceConfigsInstanceGroupManagerRequest', + 'UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest', + 'UpdateRegionAutoscalerRequest', + 'UpdateRegionBackendServiceRequest', + 'UpdateRegionHealthCheckRequest', + 'UpdateRegionUrlMapRequest', + 'UpdateRouterRequest', + 'UpdateShieldedInstanceConfigInstanceRequest', + 'UpdateUrlMapRequest', + 'UrlMap', + 'UrlMapList', + 'UrlMapReference', + 'UrlMapsAggregatedList', + 'UrlMapsScopedList', + 'UrlMapsValidateRequest', + 'UrlMapsValidateResponse', + 'UrlMapTest', + 'UrlMapTestHeader', + 'UrlMapValidationResult', + 'UrlRewrite', + 'UsableSubnetwork', + 'UsableSubnetworksAggregatedList', + 'UsableSubnetworkSecondaryRange', + 'UsageExportLocation', + 'ValidateRegionUrlMapRequest', + 'ValidateUrlMapRequest', + 'VmEndpointNatMappings', + 'VmEndpointNatMappingsInterfaceNatMappings', + 'VmEndpointNatMappingsList', + 'VpnGateway', + 'VpnGatewayAggregatedList', + 'VpnGatewayList', + 'VpnGatewaysGetStatusResponse', + 'VpnGatewaysScopedList', + 'VpnGatewayStatus', + 'VpnGatewayStatusHighAvailabilityRequirementState', + 'VpnGatewayStatusTunnel', + 'VpnGatewayStatusVpnConnection', + 'VpnGatewayVpnGatewayInterface', + 'VpnTunnel', + 'VpnTunnelAggregatedList', + 'VpnTunnelList', + 'VpnTunnelsScopedList', + 'WafExpressionSet', + 'WafExpressionSetExpression', + 'WaitGlobalOperationRequest', + 'WaitRegionOperationRequest', + 'WaitZoneOperationRequest', + 'Warning', + 'Warnings', + 'WeightedBackendService', + 'XpnHostList', + 'XpnResourceId', + 'Zone', + 'ZoneList', + 'ZoneSetLabelsRequest', + 'ZoneSetPolicyRequest', +) diff --git a/owl-bot-staging/v1/google/cloud/compute/py.typed b/owl-bot-staging/v1/google/cloud/compute/py.typed new file mode 100644 index 000000000..071da5269 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-compute package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/__init__.py new file mode 100644 index 000000000..eafe986a3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/__init__.py @@ -0,0 +1,2500 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from .services.accelerator_types import AcceleratorTypesClient +from .services.addresses import AddressesClient +from .services.autoscalers import AutoscalersClient +from .services.backend_buckets import BackendBucketsClient +from .services.backend_services import BackendServicesClient +from .services.disks import DisksClient +from .services.disk_types import DiskTypesClient +from .services.external_vpn_gateways import ExternalVpnGatewaysClient +from .services.firewall_policies import FirewallPoliciesClient +from .services.firewalls import FirewallsClient +from .services.forwarding_rules import ForwardingRulesClient +from .services.global_addresses import GlobalAddressesClient +from .services.global_forwarding_rules import GlobalForwardingRulesClient +from .services.global_network_endpoint_groups import GlobalNetworkEndpointGroupsClient +from .services.global_operations import GlobalOperationsClient +from .services.global_organization_operations import GlobalOrganizationOperationsClient +from .services.global_public_delegated_prefixes import GlobalPublicDelegatedPrefixesClient +from .services.health_checks import HealthChecksClient +from .services.image_family_views import ImageFamilyViewsClient +from .services.images import ImagesClient +from .services.instance_group_managers import InstanceGroupManagersClient +from .services.instance_groups import InstanceGroupsClient +from .services.instances import InstancesClient +from .services.instance_templates import InstanceTemplatesClient +from .services.interconnect_attachments import InterconnectAttachmentsClient +from .services.interconnect_locations import InterconnectLocationsClient +from .services.interconnects import InterconnectsClient +from .services.license_codes import LicenseCodesClient +from .services.licenses import LicensesClient +from .services.machine_types import MachineTypesClient +from .services.network_endpoint_groups import NetworkEndpointGroupsClient +from .services.networks import NetworksClient +from .services.node_groups import NodeGroupsClient +from .services.node_templates import NodeTemplatesClient +from .services.node_types import NodeTypesClient +from .services.packet_mirrorings import PacketMirroringsClient +from .services.projects import ProjectsClient +from .services.public_advertised_prefixes import PublicAdvertisedPrefixesClient +from .services.public_delegated_prefixes import PublicDelegatedPrefixesClient +from .services.region_autoscalers import RegionAutoscalersClient +from .services.region_backend_services import RegionBackendServicesClient +from .services.region_commitments import RegionCommitmentsClient +from .services.region_disks import RegionDisksClient +from .services.region_disk_types import RegionDiskTypesClient +from .services.region_health_checks import RegionHealthChecksClient +from .services.region_health_check_services import RegionHealthCheckServicesClient +from .services.region_instance_group_managers import RegionInstanceGroupManagersClient +from .services.region_instance_groups import RegionInstanceGroupsClient +from .services.region_instances import RegionInstancesClient +from .services.region_network_endpoint_groups import RegionNetworkEndpointGroupsClient +from .services.region_notification_endpoints import RegionNotificationEndpointsClient +from .services.region_operations import RegionOperationsClient +from .services.regions import RegionsClient +from .services.region_ssl_certificates import RegionSslCertificatesClient +from .services.region_target_http_proxies import RegionTargetHttpProxiesClient +from .services.region_target_https_proxies import RegionTargetHttpsProxiesClient +from .services.region_url_maps import RegionUrlMapsClient +from .services.reservations import ReservationsClient +from .services.resource_policies import ResourcePoliciesClient +from .services.routers import RoutersClient +from .services.routes import RoutesClient +from .services.security_policies import SecurityPoliciesClient +from .services.service_attachments import ServiceAttachmentsClient +from .services.snapshots import SnapshotsClient +from .services.ssl_certificates import SslCertificatesClient +from .services.ssl_policies import SslPoliciesClient +from .services.subnetworks import SubnetworksClient +from .services.target_grpc_proxies import TargetGrpcProxiesClient +from .services.target_http_proxies import TargetHttpProxiesClient +from .services.target_https_proxies import TargetHttpsProxiesClient +from .services.target_instances import TargetInstancesClient +from .services.target_pools import TargetPoolsClient +from .services.target_ssl_proxies import TargetSslProxiesClient +from .services.target_tcp_proxies import TargetTcpProxiesClient +from .services.target_vpn_gateways import TargetVpnGatewaysClient +from .services.url_maps import UrlMapsClient +from .services.vpn_gateways import VpnGatewaysClient +from .services.vpn_tunnels import VpnTunnelsClient +from .services.zone_operations import ZoneOperationsClient +from .services.zones import ZonesClient + +from .types.compute import AbandonInstancesInstanceGroupManagerRequest +from .types.compute import AbandonInstancesRegionInstanceGroupManagerRequest +from .types.compute import AcceleratorConfig +from .types.compute import Accelerators +from .types.compute import AcceleratorType +from .types.compute import AcceleratorTypeAggregatedList +from .types.compute import AcceleratorTypeList +from .types.compute import AcceleratorTypesScopedList +from .types.compute import AccessConfig +from .types.compute import AddAccessConfigInstanceRequest +from .types.compute import AddAssociationFirewallPolicyRequest +from .types.compute import AddHealthCheckTargetPoolRequest +from .types.compute import AddInstancesInstanceGroupRequest +from .types.compute import AddInstanceTargetPoolRequest +from .types.compute import AddNodesNodeGroupRequest +from .types.compute import AddPeeringNetworkRequest +from .types.compute import AddResourcePoliciesDiskRequest +from .types.compute import AddResourcePoliciesInstanceRequest +from .types.compute import AddResourcePoliciesRegionDiskRequest +from .types.compute import Address +from .types.compute import AddressAggregatedList +from .types.compute import AddressesScopedList +from .types.compute import AddressList +from .types.compute import AddRuleFirewallPolicyRequest +from .types.compute import AddRuleSecurityPolicyRequest +from .types.compute import AddSignedUrlKeyBackendBucketRequest +from .types.compute import AddSignedUrlKeyBackendServiceRequest +from .types.compute import AdvancedMachineFeatures +from .types.compute import AggregatedListAcceleratorTypesRequest +from .types.compute import AggregatedListAddressesRequest +from .types.compute import AggregatedListAutoscalersRequest +from .types.compute import AggregatedListBackendServicesRequest +from .types.compute import AggregatedListDisksRequest +from .types.compute import AggregatedListDiskTypesRequest +from .types.compute import AggregatedListForwardingRulesRequest +from .types.compute import AggregatedListGlobalOperationsRequest +from .types.compute import AggregatedListHealthChecksRequest +from .types.compute import AggregatedListInstanceGroupManagersRequest +from .types.compute import AggregatedListInstanceGroupsRequest +from .types.compute import AggregatedListInstancesRequest +from .types.compute import AggregatedListInterconnectAttachmentsRequest +from .types.compute import AggregatedListMachineTypesRequest +from .types.compute import AggregatedListNetworkEndpointGroupsRequest +from .types.compute import AggregatedListNodeGroupsRequest +from .types.compute import AggregatedListNodeTemplatesRequest +from .types.compute import AggregatedListNodeTypesRequest +from .types.compute import AggregatedListPacketMirroringsRequest +from .types.compute import AggregatedListPublicDelegatedPrefixesRequest +from .types.compute import AggregatedListRegionCommitmentsRequest +from .types.compute import AggregatedListReservationsRequest +from .types.compute import AggregatedListResourcePoliciesRequest +from .types.compute import AggregatedListRoutersRequest +from .types.compute import AggregatedListServiceAttachmentsRequest +from .types.compute import AggregatedListSslCertificatesRequest +from .types.compute import AggregatedListSubnetworksRequest +from .types.compute import AggregatedListTargetHttpProxiesRequest +from .types.compute import AggregatedListTargetHttpsProxiesRequest +from .types.compute import AggregatedListTargetInstancesRequest +from .types.compute import AggregatedListTargetPoolsRequest +from .types.compute import AggregatedListTargetVpnGatewaysRequest +from .types.compute import AggregatedListUrlMapsRequest +from .types.compute import AggregatedListVpnGatewaysRequest +from .types.compute import AggregatedListVpnTunnelsRequest +from .types.compute import AliasIpRange +from .types.compute import AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk +from .types.compute import AllocationSpecificSKUAllocationReservedInstanceProperties +from .types.compute import AllocationSpecificSKUReservation +from .types.compute import Allowed +from .types.compute import ApplyUpdatesToInstancesInstanceGroupManagerRequest +from .types.compute import ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest +from .types.compute import AttachDiskInstanceRequest +from .types.compute import AttachedDisk +from .types.compute import AttachedDiskInitializeParams +from .types.compute import AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest +from .types.compute import AttachNetworkEndpointsNetworkEndpointGroupRequest +from .types.compute import AuditConfig +from .types.compute import AuditLogConfig +from .types.compute import AuthorizationLoggingOptions +from .types.compute import Autoscaler +from .types.compute import AutoscalerAggregatedList +from .types.compute import AutoscalerList +from .types.compute import AutoscalersScopedList +from .types.compute import AutoscalerStatusDetails +from .types.compute import AutoscalingPolicy +from .types.compute import AutoscalingPolicyCpuUtilization +from .types.compute import AutoscalingPolicyCustomMetricUtilization +from .types.compute import AutoscalingPolicyLoadBalancingUtilization +from .types.compute import AutoscalingPolicyScaleInControl +from .types.compute import AutoscalingPolicyScalingSchedule +from .types.compute import Backend +from .types.compute import BackendBucket +from .types.compute import BackendBucketCdnPolicy +from .types.compute import BackendBucketCdnPolicyBypassCacheOnRequestHeader +from .types.compute import BackendBucketCdnPolicyNegativeCachingPolicy +from .types.compute import BackendBucketList +from .types.compute import BackendService +from .types.compute import BackendServiceAggregatedList +from .types.compute import BackendServiceCdnPolicy +from .types.compute import BackendServiceCdnPolicyBypassCacheOnRequestHeader +from .types.compute import BackendServiceCdnPolicyNegativeCachingPolicy +from .types.compute import BackendServiceFailoverPolicy +from .types.compute import BackendServiceGroupHealth +from .types.compute import BackendServiceIAP +from .types.compute import BackendServiceList +from .types.compute import BackendServiceLogConfig +from .types.compute import BackendServiceReference +from .types.compute import BackendServicesScopedList +from .types.compute import Binding +from .types.compute import BulkInsertInstanceRequest +from .types.compute import BulkInsertInstanceResource +from .types.compute import BulkInsertInstanceResourcePerInstanceProperties +from .types.compute import BulkInsertRegionInstanceRequest +from .types.compute import CacheInvalidationRule +from .types.compute import CacheKeyPolicy +from .types.compute import CircuitBreakers +from .types.compute import CloneRulesFirewallPolicyRequest +from .types.compute import Commitment +from .types.compute import CommitmentAggregatedList +from .types.compute import CommitmentList +from .types.compute import CommitmentsScopedList +from .types.compute import Condition +from .types.compute import ConfidentialInstanceConfig +from .types.compute import ConnectionDraining +from .types.compute import ConsistentHashLoadBalancerSettings +from .types.compute import ConsistentHashLoadBalancerSettingsHttpCookie +from .types.compute import CorsPolicy +from .types.compute import CreateInstancesInstanceGroupManagerRequest +from .types.compute import CreateInstancesRegionInstanceGroupManagerRequest +from .types.compute import CreateSnapshotDiskRequest +from .types.compute import CreateSnapshotRegionDiskRequest +from .types.compute import CustomerEncryptionKey +from .types.compute import CustomerEncryptionKeyProtectedDisk +from .types.compute import Data +from .types.compute import DeleteAccessConfigInstanceRequest +from .types.compute import DeleteAddressRequest +from .types.compute import DeleteAutoscalerRequest +from .types.compute import DeleteBackendBucketRequest +from .types.compute import DeleteBackendServiceRequest +from .types.compute import DeleteDiskRequest +from .types.compute import DeleteExternalVpnGatewayRequest +from .types.compute import DeleteFirewallPolicyRequest +from .types.compute import DeleteFirewallRequest +from .types.compute import DeleteForwardingRuleRequest +from .types.compute import DeleteGlobalAddressRequest +from .types.compute import DeleteGlobalForwardingRuleRequest +from .types.compute import DeleteGlobalNetworkEndpointGroupRequest +from .types.compute import DeleteGlobalOperationRequest +from .types.compute import DeleteGlobalOperationResponse +from .types.compute import DeleteGlobalOrganizationOperationRequest +from .types.compute import DeleteGlobalOrganizationOperationResponse +from .types.compute import DeleteGlobalPublicDelegatedPrefixeRequest +from .types.compute import DeleteHealthCheckRequest +from .types.compute import DeleteImageRequest +from .types.compute import DeleteInstanceGroupManagerRequest +from .types.compute import DeleteInstanceGroupRequest +from .types.compute import DeleteInstanceRequest +from .types.compute import DeleteInstancesInstanceGroupManagerRequest +from .types.compute import DeleteInstancesRegionInstanceGroupManagerRequest +from .types.compute import DeleteInstanceTemplateRequest +from .types.compute import DeleteInterconnectAttachmentRequest +from .types.compute import DeleteInterconnectRequest +from .types.compute import DeleteLicenseRequest +from .types.compute import DeleteNetworkEndpointGroupRequest +from .types.compute import DeleteNetworkRequest +from .types.compute import DeleteNodeGroupRequest +from .types.compute import DeleteNodesNodeGroupRequest +from .types.compute import DeleteNodeTemplateRequest +from .types.compute import DeletePacketMirroringRequest +from .types.compute import DeletePerInstanceConfigsInstanceGroupManagerRequest +from .types.compute import DeletePerInstanceConfigsRegionInstanceGroupManagerRequest +from .types.compute import DeletePublicAdvertisedPrefixeRequest +from .types.compute import DeletePublicDelegatedPrefixeRequest +from .types.compute import DeleteRegionAutoscalerRequest +from .types.compute import DeleteRegionBackendServiceRequest +from .types.compute import DeleteRegionDiskRequest +from .types.compute import DeleteRegionHealthCheckRequest +from .types.compute import DeleteRegionHealthCheckServiceRequest +from .types.compute import DeleteRegionInstanceGroupManagerRequest +from .types.compute import DeleteRegionNetworkEndpointGroupRequest +from .types.compute import DeleteRegionNotificationEndpointRequest +from .types.compute import DeleteRegionOperationRequest +from .types.compute import DeleteRegionOperationResponse +from .types.compute import DeleteRegionSslCertificateRequest +from .types.compute import DeleteRegionTargetHttpProxyRequest +from .types.compute import DeleteRegionTargetHttpsProxyRequest +from .types.compute import DeleteRegionUrlMapRequest +from .types.compute import DeleteReservationRequest +from .types.compute import DeleteResourcePolicyRequest +from .types.compute import DeleteRouteRequest +from .types.compute import DeleteRouterRequest +from .types.compute import DeleteSecurityPolicyRequest +from .types.compute import DeleteServiceAttachmentRequest +from .types.compute import DeleteSignedUrlKeyBackendBucketRequest +from .types.compute import DeleteSignedUrlKeyBackendServiceRequest +from .types.compute import DeleteSnapshotRequest +from .types.compute import DeleteSslCertificateRequest +from .types.compute import DeleteSslPolicyRequest +from .types.compute import DeleteSubnetworkRequest +from .types.compute import DeleteTargetGrpcProxyRequest +from .types.compute import DeleteTargetHttpProxyRequest +from .types.compute import DeleteTargetHttpsProxyRequest +from .types.compute import DeleteTargetInstanceRequest +from .types.compute import DeleteTargetPoolRequest +from .types.compute import DeleteTargetSslProxyRequest +from .types.compute import DeleteTargetTcpProxyRequest +from .types.compute import DeleteTargetVpnGatewayRequest +from .types.compute import DeleteUrlMapRequest +from .types.compute import DeleteVpnGatewayRequest +from .types.compute import DeleteVpnTunnelRequest +from .types.compute import DeleteZoneOperationRequest +from .types.compute import DeleteZoneOperationResponse +from .types.compute import Denied +from .types.compute import DeprecateImageRequest +from .types.compute import DeprecationStatus +from .types.compute import DetachDiskInstanceRequest +from .types.compute import DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest +from .types.compute import DetachNetworkEndpointsNetworkEndpointGroupRequest +from .types.compute import DisableXpnHostProjectRequest +from .types.compute import DisableXpnResourceProjectRequest +from .types.compute import Disk +from .types.compute import DiskAggregatedList +from .types.compute import DiskInstantiationConfig +from .types.compute import DiskList +from .types.compute import DiskMoveRequest +from .types.compute import DisksAddResourcePoliciesRequest +from .types.compute import DisksRemoveResourcePoliciesRequest +from .types.compute import DisksResizeRequest +from .types.compute import DisksScopedList +from .types.compute import DiskType +from .types.compute import DiskTypeAggregatedList +from .types.compute import DiskTypeList +from .types.compute import DiskTypesScopedList +from .types.compute import DisplayDevice +from .types.compute import DistributionPolicy +from .types.compute import DistributionPolicyZoneConfiguration +from .types.compute import Duration +from .types.compute import EnableXpnHostProjectRequest +from .types.compute import EnableXpnResourceProjectRequest +from .types.compute import Error +from .types.compute import Errors +from .types.compute import ExchangedPeeringRoute +from .types.compute import ExchangedPeeringRoutesList +from .types.compute import ExpandIpCidrRangeSubnetworkRequest +from .types.compute import Expr +from .types.compute import ExternalVpnGateway +from .types.compute import ExternalVpnGatewayInterface +from .types.compute import ExternalVpnGatewayList +from .types.compute import FileContentBuffer +from .types.compute import Firewall +from .types.compute import FirewallList +from .types.compute import FirewallLogConfig +from .types.compute import FirewallPoliciesListAssociationsResponse +from .types.compute import FirewallPolicy +from .types.compute import FirewallPolicyAssociation +from .types.compute import FirewallPolicyList +from .types.compute import FirewallPolicyRule +from .types.compute import FirewallPolicyRuleMatcher +from .types.compute import FirewallPolicyRuleMatcherLayer4Config +from .types.compute import FixedOrPercent +from .types.compute import ForwardingRule +from .types.compute import ForwardingRuleAggregatedList +from .types.compute import ForwardingRuleList +from .types.compute import ForwardingRuleReference +from .types.compute import ForwardingRuleServiceDirectoryRegistration +from .types.compute import ForwardingRulesScopedList +from .types.compute import GetAcceleratorTypeRequest +from .types.compute import GetAddressRequest +from .types.compute import GetAssociationFirewallPolicyRequest +from .types.compute import GetAutoscalerRequest +from .types.compute import GetBackendBucketRequest +from .types.compute import GetBackendServiceRequest +from .types.compute import GetDiagnosticsInterconnectRequest +from .types.compute import GetDiskRequest +from .types.compute import GetDiskTypeRequest +from .types.compute import GetEffectiveFirewallsInstanceRequest +from .types.compute import GetEffectiveFirewallsNetworkRequest +from .types.compute import GetExternalVpnGatewayRequest +from .types.compute import GetFirewallPolicyRequest +from .types.compute import GetFirewallRequest +from .types.compute import GetForwardingRuleRequest +from .types.compute import GetFromFamilyImageRequest +from .types.compute import GetGlobalAddressRequest +from .types.compute import GetGlobalForwardingRuleRequest +from .types.compute import GetGlobalNetworkEndpointGroupRequest +from .types.compute import GetGlobalOperationRequest +from .types.compute import GetGlobalOrganizationOperationRequest +from .types.compute import GetGlobalPublicDelegatedPrefixeRequest +from .types.compute import GetGuestAttributesInstanceRequest +from .types.compute import GetHealthBackendServiceRequest +from .types.compute import GetHealthCheckRequest +from .types.compute import GetHealthRegionBackendServiceRequest +from .types.compute import GetHealthTargetPoolRequest +from .types.compute import GetIamPolicyDiskRequest +from .types.compute import GetIamPolicyFirewallPolicyRequest +from .types.compute import GetIamPolicyImageRequest +from .types.compute import GetIamPolicyInstanceRequest +from .types.compute import GetIamPolicyInstanceTemplateRequest +from .types.compute import GetIamPolicyLicenseRequest +from .types.compute import GetIamPolicyNodeGroupRequest +from .types.compute import GetIamPolicyNodeTemplateRequest +from .types.compute import GetIamPolicyRegionDiskRequest +from .types.compute import GetIamPolicyReservationRequest +from .types.compute import GetIamPolicyResourcePolicyRequest +from .types.compute import GetIamPolicyServiceAttachmentRequest +from .types.compute import GetIamPolicySnapshotRequest +from .types.compute import GetIamPolicySubnetworkRequest +from .types.compute import GetImageFamilyViewRequest +from .types.compute import GetImageRequest +from .types.compute import GetInstanceGroupManagerRequest +from .types.compute import GetInstanceGroupRequest +from .types.compute import GetInstanceRequest +from .types.compute import GetInstanceTemplateRequest +from .types.compute import GetInterconnectAttachmentRequest +from .types.compute import GetInterconnectLocationRequest +from .types.compute import GetInterconnectRequest +from .types.compute import GetLicenseCodeRequest +from .types.compute import GetLicenseRequest +from .types.compute import GetMachineTypeRequest +from .types.compute import GetNatMappingInfoRoutersRequest +from .types.compute import GetNetworkEndpointGroupRequest +from .types.compute import GetNetworkRequest +from .types.compute import GetNodeGroupRequest +from .types.compute import GetNodeTemplateRequest +from .types.compute import GetNodeTypeRequest +from .types.compute import GetPacketMirroringRequest +from .types.compute import GetProjectRequest +from .types.compute import GetPublicAdvertisedPrefixeRequest +from .types.compute import GetPublicDelegatedPrefixeRequest +from .types.compute import GetRegionAutoscalerRequest +from .types.compute import GetRegionBackendServiceRequest +from .types.compute import GetRegionCommitmentRequest +from .types.compute import GetRegionDiskRequest +from .types.compute import GetRegionDiskTypeRequest +from .types.compute import GetRegionHealthCheckRequest +from .types.compute import GetRegionHealthCheckServiceRequest +from .types.compute import GetRegionInstanceGroupManagerRequest +from .types.compute import GetRegionInstanceGroupRequest +from .types.compute import GetRegionNetworkEndpointGroupRequest +from .types.compute import GetRegionNotificationEndpointRequest +from .types.compute import GetRegionOperationRequest +from .types.compute import GetRegionRequest +from .types.compute import GetRegionSslCertificateRequest +from .types.compute import GetRegionTargetHttpProxyRequest +from .types.compute import GetRegionTargetHttpsProxyRequest +from .types.compute import GetRegionUrlMapRequest +from .types.compute import GetReservationRequest +from .types.compute import GetResourcePolicyRequest +from .types.compute import GetRouteRequest +from .types.compute import GetRouterRequest +from .types.compute import GetRouterStatusRouterRequest +from .types.compute import GetRuleFirewallPolicyRequest +from .types.compute import GetRuleSecurityPolicyRequest +from .types.compute import GetScreenshotInstanceRequest +from .types.compute import GetSecurityPolicyRequest +from .types.compute import GetSerialPortOutputInstanceRequest +from .types.compute import GetServiceAttachmentRequest +from .types.compute import GetShieldedInstanceIdentityInstanceRequest +from .types.compute import GetSnapshotRequest +from .types.compute import GetSslCertificateRequest +from .types.compute import GetSslPolicyRequest +from .types.compute import GetStatusVpnGatewayRequest +from .types.compute import GetSubnetworkRequest +from .types.compute import GetTargetGrpcProxyRequest +from .types.compute import GetTargetHttpProxyRequest +from .types.compute import GetTargetHttpsProxyRequest +from .types.compute import GetTargetInstanceRequest +from .types.compute import GetTargetPoolRequest +from .types.compute import GetTargetSslProxyRequest +from .types.compute import GetTargetTcpProxyRequest +from .types.compute import GetTargetVpnGatewayRequest +from .types.compute import GetUrlMapRequest +from .types.compute import GetVpnGatewayRequest +from .types.compute import GetVpnTunnelRequest +from .types.compute import GetXpnHostProjectRequest +from .types.compute import GetXpnResourcesProjectsRequest +from .types.compute import GetZoneOperationRequest +from .types.compute import GetZoneRequest +from .types.compute import GlobalNetworkEndpointGroupsAttachEndpointsRequest +from .types.compute import GlobalNetworkEndpointGroupsDetachEndpointsRequest +from .types.compute import GlobalOrganizationSetPolicyRequest +from .types.compute import GlobalSetLabelsRequest +from .types.compute import GlobalSetPolicyRequest +from .types.compute import GRPCHealthCheck +from .types.compute import GuestAttributes +from .types.compute import GuestAttributesEntry +from .types.compute import GuestAttributesValue +from .types.compute import GuestOsFeature +from .types.compute import HealthCheck +from .types.compute import HealthCheckList +from .types.compute import HealthCheckLogConfig +from .types.compute import HealthCheckReference +from .types.compute import HealthChecksAggregatedList +from .types.compute import HealthCheckService +from .types.compute import HealthCheckServiceReference +from .types.compute import HealthCheckServicesList +from .types.compute import HealthChecksScopedList +from .types.compute import HealthStatus +from .types.compute import HealthStatusForNetworkEndpoint +from .types.compute import HostRule +from .types.compute import HTTP2HealthCheck +from .types.compute import HttpFaultAbort +from .types.compute import HttpFaultDelay +from .types.compute import HttpFaultInjection +from .types.compute import HttpHeaderAction +from .types.compute import HttpHeaderMatch +from .types.compute import HttpHeaderOption +from .types.compute import HTTPHealthCheck +from .types.compute import HttpQueryParameterMatch +from .types.compute import HttpRedirectAction +from .types.compute import HttpRetryPolicy +from .types.compute import HttpRouteAction +from .types.compute import HttpRouteRule +from .types.compute import HttpRouteRuleMatch +from .types.compute import HTTPSHealthCheck +from .types.compute import Image +from .types.compute import ImageFamilyView +from .types.compute import ImageList +from .types.compute import InitialStateConfig +from .types.compute import InsertAddressRequest +from .types.compute import InsertAutoscalerRequest +from .types.compute import InsertBackendBucketRequest +from .types.compute import InsertBackendServiceRequest +from .types.compute import InsertDiskRequest +from .types.compute import InsertExternalVpnGatewayRequest +from .types.compute import InsertFirewallPolicyRequest +from .types.compute import InsertFirewallRequest +from .types.compute import InsertForwardingRuleRequest +from .types.compute import InsertGlobalAddressRequest +from .types.compute import InsertGlobalForwardingRuleRequest +from .types.compute import InsertGlobalNetworkEndpointGroupRequest +from .types.compute import InsertGlobalPublicDelegatedPrefixeRequest +from .types.compute import InsertHealthCheckRequest +from .types.compute import InsertImageRequest +from .types.compute import InsertInstanceGroupManagerRequest +from .types.compute import InsertInstanceGroupRequest +from .types.compute import InsertInstanceRequest +from .types.compute import InsertInstanceTemplateRequest +from .types.compute import InsertInterconnectAttachmentRequest +from .types.compute import InsertInterconnectRequest +from .types.compute import InsertLicenseRequest +from .types.compute import InsertNetworkEndpointGroupRequest +from .types.compute import InsertNetworkRequest +from .types.compute import InsertNodeGroupRequest +from .types.compute import InsertNodeTemplateRequest +from .types.compute import InsertPacketMirroringRequest +from .types.compute import InsertPublicAdvertisedPrefixeRequest +from .types.compute import InsertPublicDelegatedPrefixeRequest +from .types.compute import InsertRegionAutoscalerRequest +from .types.compute import InsertRegionBackendServiceRequest +from .types.compute import InsertRegionCommitmentRequest +from .types.compute import InsertRegionDiskRequest +from .types.compute import InsertRegionHealthCheckRequest +from .types.compute import InsertRegionHealthCheckServiceRequest +from .types.compute import InsertRegionInstanceGroupManagerRequest +from .types.compute import InsertRegionNetworkEndpointGroupRequest +from .types.compute import InsertRegionNotificationEndpointRequest +from .types.compute import InsertRegionSslCertificateRequest +from .types.compute import InsertRegionTargetHttpProxyRequest +from .types.compute import InsertRegionTargetHttpsProxyRequest +from .types.compute import InsertRegionUrlMapRequest +from .types.compute import InsertReservationRequest +from .types.compute import InsertResourcePolicyRequest +from .types.compute import InsertRouteRequest +from .types.compute import InsertRouterRequest +from .types.compute import InsertSecurityPolicyRequest +from .types.compute import InsertServiceAttachmentRequest +from .types.compute import InsertSslCertificateRequest +from .types.compute import InsertSslPolicyRequest +from .types.compute import InsertSubnetworkRequest +from .types.compute import InsertTargetGrpcProxyRequest +from .types.compute import InsertTargetHttpProxyRequest +from .types.compute import InsertTargetHttpsProxyRequest +from .types.compute import InsertTargetInstanceRequest +from .types.compute import InsertTargetPoolRequest +from .types.compute import InsertTargetSslProxyRequest +from .types.compute import InsertTargetTcpProxyRequest +from .types.compute import InsertTargetVpnGatewayRequest +from .types.compute import InsertUrlMapRequest +from .types.compute import InsertVpnGatewayRequest +from .types.compute import InsertVpnTunnelRequest +from .types.compute import Instance +from .types.compute import InstanceAggregatedList +from .types.compute import InstanceGroup +from .types.compute import InstanceGroupAggregatedList +from .types.compute import InstanceGroupList +from .types.compute import InstanceGroupManager +from .types.compute import InstanceGroupManagerActionsSummary +from .types.compute import InstanceGroupManagerAggregatedList +from .types.compute import InstanceGroupManagerAutoHealingPolicy +from .types.compute import InstanceGroupManagerList +from .types.compute import InstanceGroupManagersAbandonInstancesRequest +from .types.compute import InstanceGroupManagersApplyUpdatesRequest +from .types.compute import InstanceGroupManagersCreateInstancesRequest +from .types.compute import InstanceGroupManagersDeleteInstancesRequest +from .types.compute import InstanceGroupManagersDeletePerInstanceConfigsReq +from .types.compute import InstanceGroupManagersListErrorsResponse +from .types.compute import InstanceGroupManagersListManagedInstancesResponse +from .types.compute import InstanceGroupManagersListPerInstanceConfigsResp +from .types.compute import InstanceGroupManagersPatchPerInstanceConfigsReq +from .types.compute import InstanceGroupManagersRecreateInstancesRequest +from .types.compute import InstanceGroupManagersScopedList +from .types.compute import InstanceGroupManagersSetInstanceTemplateRequest +from .types.compute import InstanceGroupManagersSetTargetPoolsRequest +from .types.compute import InstanceGroupManagerStatus +from .types.compute import InstanceGroupManagerStatusStateful +from .types.compute import InstanceGroupManagerStatusStatefulPerInstanceConfigs +from .types.compute import InstanceGroupManagerStatusVersionTarget +from .types.compute import InstanceGroupManagersUpdatePerInstanceConfigsReq +from .types.compute import InstanceGroupManagerUpdatePolicy +from .types.compute import InstanceGroupManagerVersion +from .types.compute import InstanceGroupsAddInstancesRequest +from .types.compute import InstanceGroupsListInstances +from .types.compute import InstanceGroupsListInstancesRequest +from .types.compute import InstanceGroupsRemoveInstancesRequest +from .types.compute import InstanceGroupsScopedList +from .types.compute import InstanceGroupsSetNamedPortsRequest +from .types.compute import InstanceList +from .types.compute import InstanceListReferrers +from .types.compute import InstanceManagedByIgmError +from .types.compute import InstanceManagedByIgmErrorInstanceActionDetails +from .types.compute import InstanceManagedByIgmErrorManagedInstanceError +from .types.compute import InstanceMoveRequest +from .types.compute import InstanceProperties +from .types.compute import InstanceReference +from .types.compute import InstancesAddResourcePoliciesRequest +from .types.compute import InstancesGetEffectiveFirewallsResponse +from .types.compute import InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy +from .types.compute import InstancesRemoveResourcePoliciesRequest +from .types.compute import InstancesScopedList +from .types.compute import InstancesSetLabelsRequest +from .types.compute import InstancesSetMachineResourcesRequest +from .types.compute import InstancesSetMachineTypeRequest +from .types.compute import InstancesSetMinCpuPlatformRequest +from .types.compute import InstancesSetServiceAccountRequest +from .types.compute import InstancesStartWithEncryptionKeyRequest +from .types.compute import InstanceTemplate +from .types.compute import InstanceTemplateList +from .types.compute import InstanceWithNamedPorts +from .types.compute import Int64RangeMatch +from .types.compute import Interconnect +from .types.compute import InterconnectAttachment +from .types.compute import InterconnectAttachmentAggregatedList +from .types.compute import InterconnectAttachmentList +from .types.compute import InterconnectAttachmentPartnerMetadata +from .types.compute import InterconnectAttachmentPrivateInfo +from .types.compute import InterconnectAttachmentsScopedList +from .types.compute import InterconnectCircuitInfo +from .types.compute import InterconnectDiagnostics +from .types.compute import InterconnectDiagnosticsARPEntry +from .types.compute import InterconnectDiagnosticsLinkLACPStatus +from .types.compute import InterconnectDiagnosticsLinkOpticalPower +from .types.compute import InterconnectDiagnosticsLinkStatus +from .types.compute import InterconnectList +from .types.compute import InterconnectLocation +from .types.compute import InterconnectLocationList +from .types.compute import InterconnectLocationRegionInfo +from .types.compute import InterconnectOutageNotification +from .types.compute import InterconnectsGetDiagnosticsResponse +from .types.compute import InvalidateCacheUrlMapRequest +from .types.compute import Items +from .types.compute import License +from .types.compute import LicenseCode +from .types.compute import LicenseCodeLicenseAlias +from .types.compute import LicenseResourceCommitment +from .types.compute import LicenseResourceRequirements +from .types.compute import LicensesListResponse +from .types.compute import ListAcceleratorTypesRequest +from .types.compute import ListAddressesRequest +from .types.compute import ListAssociationsFirewallPolicyRequest +from .types.compute import ListAutoscalersRequest +from .types.compute import ListAvailableFeaturesSslPoliciesRequest +from .types.compute import ListBackendBucketsRequest +from .types.compute import ListBackendServicesRequest +from .types.compute import ListDisksRequest +from .types.compute import ListDiskTypesRequest +from .types.compute import ListErrorsInstanceGroupManagersRequest +from .types.compute import ListErrorsRegionInstanceGroupManagersRequest +from .types.compute import ListExternalVpnGatewaysRequest +from .types.compute import ListFirewallPoliciesRequest +from .types.compute import ListFirewallsRequest +from .types.compute import ListForwardingRulesRequest +from .types.compute import ListGlobalAddressesRequest +from .types.compute import ListGlobalForwardingRulesRequest +from .types.compute import ListGlobalNetworkEndpointGroupsRequest +from .types.compute import ListGlobalOperationsRequest +from .types.compute import ListGlobalOrganizationOperationsRequest +from .types.compute import ListGlobalPublicDelegatedPrefixesRequest +from .types.compute import ListHealthChecksRequest +from .types.compute import ListImagesRequest +from .types.compute import ListInstanceGroupManagersRequest +from .types.compute import ListInstanceGroupsRequest +from .types.compute import ListInstancesInstanceGroupsRequest +from .types.compute import ListInstancesRegionInstanceGroupsRequest +from .types.compute import ListInstancesRequest +from .types.compute import ListInstanceTemplatesRequest +from .types.compute import ListInterconnectAttachmentsRequest +from .types.compute import ListInterconnectLocationsRequest +from .types.compute import ListInterconnectsRequest +from .types.compute import ListLicensesRequest +from .types.compute import ListMachineTypesRequest +from .types.compute import ListManagedInstancesInstanceGroupManagersRequest +from .types.compute import ListManagedInstancesRegionInstanceGroupManagersRequest +from .types.compute import ListNetworkEndpointGroupsRequest +from .types.compute import ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest +from .types.compute import ListNetworkEndpointsNetworkEndpointGroupsRequest +from .types.compute import ListNetworksRequest +from .types.compute import ListNodeGroupsRequest +from .types.compute import ListNodesNodeGroupsRequest +from .types.compute import ListNodeTemplatesRequest +from .types.compute import ListNodeTypesRequest +from .types.compute import ListPacketMirroringsRequest +from .types.compute import ListPeeringRoutesNetworksRequest +from .types.compute import ListPerInstanceConfigsInstanceGroupManagersRequest +from .types.compute import ListPerInstanceConfigsRegionInstanceGroupManagersRequest +from .types.compute import ListPreconfiguredExpressionSetsSecurityPoliciesRequest +from .types.compute import ListPublicAdvertisedPrefixesRequest +from .types.compute import ListPublicDelegatedPrefixesRequest +from .types.compute import ListReferrersInstancesRequest +from .types.compute import ListRegionAutoscalersRequest +from .types.compute import ListRegionBackendServicesRequest +from .types.compute import ListRegionCommitmentsRequest +from .types.compute import ListRegionDisksRequest +from .types.compute import ListRegionDiskTypesRequest +from .types.compute import ListRegionHealthCheckServicesRequest +from .types.compute import ListRegionHealthChecksRequest +from .types.compute import ListRegionInstanceGroupManagersRequest +from .types.compute import ListRegionInstanceGroupsRequest +from .types.compute import ListRegionNetworkEndpointGroupsRequest +from .types.compute import ListRegionNotificationEndpointsRequest +from .types.compute import ListRegionOperationsRequest +from .types.compute import ListRegionsRequest +from .types.compute import ListRegionSslCertificatesRequest +from .types.compute import ListRegionTargetHttpProxiesRequest +from .types.compute import ListRegionTargetHttpsProxiesRequest +from .types.compute import ListRegionUrlMapsRequest +from .types.compute import ListReservationsRequest +from .types.compute import ListResourcePoliciesRequest +from .types.compute import ListRoutersRequest +from .types.compute import ListRoutesRequest +from .types.compute import ListSecurityPoliciesRequest +from .types.compute import ListServiceAttachmentsRequest +from .types.compute import ListSnapshotsRequest +from .types.compute import ListSslCertificatesRequest +from .types.compute import ListSslPoliciesRequest +from .types.compute import ListSubnetworksRequest +from .types.compute import ListTargetGrpcProxiesRequest +from .types.compute import ListTargetHttpProxiesRequest +from .types.compute import ListTargetHttpsProxiesRequest +from .types.compute import ListTargetInstancesRequest +from .types.compute import ListTargetPoolsRequest +from .types.compute import ListTargetSslProxiesRequest +from .types.compute import ListTargetTcpProxiesRequest +from .types.compute import ListTargetVpnGatewaysRequest +from .types.compute import ListUrlMapsRequest +from .types.compute import ListUsableSubnetworksRequest +from .types.compute import ListVpnGatewaysRequest +from .types.compute import ListVpnTunnelsRequest +from .types.compute import ListXpnHostsProjectsRequest +from .types.compute import ListZoneOperationsRequest +from .types.compute import ListZonesRequest +from .types.compute import LocalDisk +from .types.compute import LocationPolicy +from .types.compute import LocationPolicyLocation +from .types.compute import LogConfig +from .types.compute import LogConfigCloudAuditOptions +from .types.compute import LogConfigCounterOptions +from .types.compute import LogConfigCounterOptionsCustomField +from .types.compute import LogConfigDataAccessOptions +from .types.compute import MachineType +from .types.compute import MachineTypeAggregatedList +from .types.compute import MachineTypeList +from .types.compute import MachineTypesScopedList +from .types.compute import ManagedInstance +from .types.compute import ManagedInstanceInstanceHealth +from .types.compute import ManagedInstanceLastAttempt +from .types.compute import ManagedInstanceVersion +from .types.compute import Metadata +from .types.compute import MetadataFilter +from .types.compute import MetadataFilterLabelMatch +from .types.compute import MoveDiskProjectRequest +from .types.compute import MoveFirewallPolicyRequest +from .types.compute import MoveInstanceProjectRequest +from .types.compute import NamedPort +from .types.compute import Network +from .types.compute import NetworkEndpoint +from .types.compute import NetworkEndpointGroup +from .types.compute import NetworkEndpointGroupAggregatedList +from .types.compute import NetworkEndpointGroupAppEngine +from .types.compute import NetworkEndpointGroupCloudFunction +from .types.compute import NetworkEndpointGroupCloudRun +from .types.compute import NetworkEndpointGroupList +from .types.compute import NetworkEndpointGroupsAttachEndpointsRequest +from .types.compute import NetworkEndpointGroupsDetachEndpointsRequest +from .types.compute import NetworkEndpointGroupsListEndpointsRequest +from .types.compute import NetworkEndpointGroupsListNetworkEndpoints +from .types.compute import NetworkEndpointGroupsScopedList +from .types.compute import NetworkEndpointWithHealthStatus +from .types.compute import NetworkInterface +from .types.compute import NetworkList +from .types.compute import NetworkPeering +from .types.compute import NetworkRoutingConfig +from .types.compute import NetworksAddPeeringRequest +from .types.compute import NetworksGetEffectiveFirewallsResponse +from .types.compute import NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy +from .types.compute import NetworksRemovePeeringRequest +from .types.compute import NetworksUpdatePeeringRequest +from .types.compute import NodeGroup +from .types.compute import NodeGroupAggregatedList +from .types.compute import NodeGroupAutoscalingPolicy +from .types.compute import NodeGroupList +from .types.compute import NodeGroupMaintenanceWindow +from .types.compute import NodeGroupNode +from .types.compute import NodeGroupsAddNodesRequest +from .types.compute import NodeGroupsDeleteNodesRequest +from .types.compute import NodeGroupsListNodes +from .types.compute import NodeGroupsScopedList +from .types.compute import NodeGroupsSetNodeTemplateRequest +from .types.compute import NodeTemplate +from .types.compute import NodeTemplateAggregatedList +from .types.compute import NodeTemplateList +from .types.compute import NodeTemplateNodeTypeFlexibility +from .types.compute import NodeTemplatesScopedList +from .types.compute import NodeType +from .types.compute import NodeTypeAggregatedList +from .types.compute import NodeTypeList +from .types.compute import NodeTypesScopedList +from .types.compute import NotificationEndpoint +from .types.compute import NotificationEndpointGrpcSettings +from .types.compute import NotificationEndpointList +from .types.compute import Operation +from .types.compute import OperationAggregatedList +from .types.compute import OperationList +from .types.compute import OperationsScopedList +from .types.compute import OutlierDetection +from .types.compute import PacketMirroring +from .types.compute import PacketMirroringAggregatedList +from .types.compute import PacketMirroringFilter +from .types.compute import PacketMirroringForwardingRuleInfo +from .types.compute import PacketMirroringList +from .types.compute import PacketMirroringMirroredResourceInfo +from .types.compute import PacketMirroringMirroredResourceInfoInstanceInfo +from .types.compute import PacketMirroringMirroredResourceInfoSubnetInfo +from .types.compute import PacketMirroringNetworkInfo +from .types.compute import PacketMirroringsScopedList +from .types.compute import PatchAutoscalerRequest +from .types.compute import PatchBackendBucketRequest +from .types.compute import PatchBackendServiceRequest +from .types.compute import PatchFirewallPolicyRequest +from .types.compute import PatchFirewallRequest +from .types.compute import PatchForwardingRuleRequest +from .types.compute import PatchGlobalForwardingRuleRequest +from .types.compute import PatchGlobalPublicDelegatedPrefixeRequest +from .types.compute import PatchHealthCheckRequest +from .types.compute import PatchImageRequest +from .types.compute import PatchInstanceGroupManagerRequest +from .types.compute import PatchInterconnectAttachmentRequest +from .types.compute import PatchInterconnectRequest +from .types.compute import PatchNetworkRequest +from .types.compute import PatchNodeGroupRequest +from .types.compute import PatchPacketMirroringRequest +from .types.compute import PatchPerInstanceConfigsInstanceGroupManagerRequest +from .types.compute import PatchPerInstanceConfigsRegionInstanceGroupManagerRequest +from .types.compute import PatchPublicAdvertisedPrefixeRequest +from .types.compute import PatchPublicDelegatedPrefixeRequest +from .types.compute import PatchRegionAutoscalerRequest +from .types.compute import PatchRegionBackendServiceRequest +from .types.compute import PatchRegionHealthCheckRequest +from .types.compute import PatchRegionHealthCheckServiceRequest +from .types.compute import PatchRegionInstanceGroupManagerRequest +from .types.compute import PatchRegionUrlMapRequest +from .types.compute import PatchRouterRequest +from .types.compute import PatchRuleFirewallPolicyRequest +from .types.compute import PatchRuleSecurityPolicyRequest +from .types.compute import PatchSecurityPolicyRequest +from .types.compute import PatchServiceAttachmentRequest +from .types.compute import PatchSslPolicyRequest +from .types.compute import PatchSubnetworkRequest +from .types.compute import PatchTargetGrpcProxyRequest +from .types.compute import PatchTargetHttpProxyRequest +from .types.compute import PatchTargetHttpsProxyRequest +from .types.compute import PatchUrlMapRequest +from .types.compute import PathMatcher +from .types.compute import PathRule +from .types.compute import PerInstanceConfig +from .types.compute import Policy +from .types.compute import PreconfiguredWafSet +from .types.compute import PreservedState +from .types.compute import PreservedStatePreservedDisk +from .types.compute import PreviewRouterRequest +from .types.compute import Project +from .types.compute import ProjectsDisableXpnResourceRequest +from .types.compute import ProjectsEnableXpnResourceRequest +from .types.compute import ProjectsGetXpnResources +from .types.compute import ProjectsListXpnHostsRequest +from .types.compute import ProjectsSetDefaultNetworkTierRequest +from .types.compute import PublicAdvertisedPrefix +from .types.compute import PublicAdvertisedPrefixList +from .types.compute import PublicAdvertisedPrefixPublicDelegatedPrefix +from .types.compute import PublicDelegatedPrefix +from .types.compute import PublicDelegatedPrefixAggregatedList +from .types.compute import PublicDelegatedPrefixesScopedList +from .types.compute import PublicDelegatedPrefixList +from .types.compute import PublicDelegatedPrefixPublicDelegatedSubPrefix +from .types.compute import Quota +from .types.compute import RawDisk +from .types.compute import RecreateInstancesInstanceGroupManagerRequest +from .types.compute import RecreateInstancesRegionInstanceGroupManagerRequest +from .types.compute import Reference +from .types.compute import Region +from .types.compute import RegionAutoscalerList +from .types.compute import RegionDisksAddResourcePoliciesRequest +from .types.compute import RegionDisksRemoveResourcePoliciesRequest +from .types.compute import RegionDisksResizeRequest +from .types.compute import RegionDiskTypeList +from .types.compute import RegionInstanceGroupList +from .types.compute import RegionInstanceGroupManagerDeleteInstanceConfigReq +from .types.compute import RegionInstanceGroupManagerList +from .types.compute import RegionInstanceGroupManagerPatchInstanceConfigReq +from .types.compute import RegionInstanceGroupManagersAbandonInstancesRequest +from .types.compute import RegionInstanceGroupManagersApplyUpdatesRequest +from .types.compute import RegionInstanceGroupManagersCreateInstancesRequest +from .types.compute import RegionInstanceGroupManagersDeleteInstancesRequest +from .types.compute import RegionInstanceGroupManagersListErrorsResponse +from .types.compute import RegionInstanceGroupManagersListInstanceConfigsResp +from .types.compute import RegionInstanceGroupManagersListInstancesResponse +from .types.compute import RegionInstanceGroupManagersRecreateRequest +from .types.compute import RegionInstanceGroupManagersSetTargetPoolsRequest +from .types.compute import RegionInstanceGroupManagersSetTemplateRequest +from .types.compute import RegionInstanceGroupManagerUpdateInstanceConfigReq +from .types.compute import RegionInstanceGroupsListInstances +from .types.compute import RegionInstanceGroupsListInstancesRequest +from .types.compute import RegionInstanceGroupsSetNamedPortsRequest +from .types.compute import RegionList +from .types.compute import RegionSetLabelsRequest +from .types.compute import RegionSetPolicyRequest +from .types.compute import RegionTargetHttpsProxiesSetSslCertificatesRequest +from .types.compute import RegionUrlMapsValidateRequest +from .types.compute import RemoveAssociationFirewallPolicyRequest +from .types.compute import RemoveHealthCheckTargetPoolRequest +from .types.compute import RemoveInstancesInstanceGroupRequest +from .types.compute import RemoveInstanceTargetPoolRequest +from .types.compute import RemovePeeringNetworkRequest +from .types.compute import RemoveResourcePoliciesDiskRequest +from .types.compute import RemoveResourcePoliciesInstanceRequest +from .types.compute import RemoveResourcePoliciesRegionDiskRequest +from .types.compute import RemoveRuleFirewallPolicyRequest +from .types.compute import RemoveRuleSecurityPolicyRequest +from .types.compute import RequestMirrorPolicy +from .types.compute import Reservation +from .types.compute import ReservationAffinity +from .types.compute import ReservationAggregatedList +from .types.compute import ReservationList +from .types.compute import ReservationsResizeRequest +from .types.compute import ReservationsScopedList +from .types.compute import ResetInstanceRequest +from .types.compute import ResizeDiskRequest +from .types.compute import ResizeInstanceGroupManagerRequest +from .types.compute import ResizeRegionDiskRequest +from .types.compute import ResizeRegionInstanceGroupManagerRequest +from .types.compute import ResizeReservationRequest +from .types.compute import ResourceCommitment +from .types.compute import ResourceGroupReference +from .types.compute import ResourcePoliciesScopedList +from .types.compute import ResourcePolicy +from .types.compute import ResourcePolicyAggregatedList +from .types.compute import ResourcePolicyDailyCycle +from .types.compute import ResourcePolicyGroupPlacementPolicy +from .types.compute import ResourcePolicyHourlyCycle +from .types.compute import ResourcePolicyInstanceSchedulePolicy +from .types.compute import ResourcePolicyInstanceSchedulePolicySchedule +from .types.compute import ResourcePolicyList +from .types.compute import ResourcePolicyResourceStatus +from .types.compute import ResourcePolicyResourceStatusInstanceSchedulePolicyStatus +from .types.compute import ResourcePolicySnapshotSchedulePolicy +from .types.compute import ResourcePolicySnapshotSchedulePolicyRetentionPolicy +from .types.compute import ResourcePolicySnapshotSchedulePolicySchedule +from .types.compute import ResourcePolicySnapshotSchedulePolicySnapshotProperties +from .types.compute import ResourcePolicyWeeklyCycle +from .types.compute import ResourcePolicyWeeklyCycleDayOfWeek +from .types.compute import Route +from .types.compute import RouteAsPath +from .types.compute import RouteList +from .types.compute import Router +from .types.compute import RouterAdvertisedIpRange +from .types.compute import RouterAggregatedList +from .types.compute import RouterBgp +from .types.compute import RouterBgpPeer +from .types.compute import RouterBgpPeerBfd +from .types.compute import RouterInterface +from .types.compute import RouterList +from .types.compute import RouterNat +from .types.compute import RouterNatLogConfig +from .types.compute import RouterNatRule +from .types.compute import RouterNatRuleAction +from .types.compute import RouterNatSubnetworkToNat +from .types.compute import RoutersPreviewResponse +from .types.compute import RoutersScopedList +from .types.compute import RouterStatus +from .types.compute import RouterStatusBgpPeerStatus +from .types.compute import RouterStatusNatStatus +from .types.compute import RouterStatusNatStatusNatRuleStatus +from .types.compute import RouterStatusResponse +from .types.compute import Rule +from .types.compute import ScalingScheduleStatus +from .types.compute import Scheduling +from .types.compute import SchedulingNodeAffinity +from .types.compute import ScratchDisks +from .types.compute import Screenshot +from .types.compute import SecurityPoliciesListPreconfiguredExpressionSetsResponse +from .types.compute import SecurityPoliciesWafConfig +from .types.compute import SecurityPolicy +from .types.compute import SecurityPolicyAdaptiveProtectionConfig +from .types.compute import SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig +from .types.compute import SecurityPolicyAdvancedOptionsConfig +from .types.compute import SecurityPolicyList +from .types.compute import SecurityPolicyReference +from .types.compute import SecurityPolicyRule +from .types.compute import SecurityPolicyRuleMatcher +from .types.compute import SecurityPolicyRuleMatcherConfig +from .types.compute import SecuritySettings +from .types.compute import SendDiagnosticInterruptInstanceRequest +from .types.compute import SendDiagnosticInterruptInstanceResponse +from .types.compute import SerialPortOutput +from .types.compute import ServerBinding +from .types.compute import ServiceAccount +from .types.compute import ServiceAttachment +from .types.compute import ServiceAttachmentAggregatedList +from .types.compute import ServiceAttachmentConnectedEndpoint +from .types.compute import ServiceAttachmentConsumerProjectLimit +from .types.compute import ServiceAttachmentList +from .types.compute import ServiceAttachmentsScopedList +from .types.compute import SetBackendServiceTargetSslProxyRequest +from .types.compute import SetBackendServiceTargetTcpProxyRequest +from .types.compute import SetBackupTargetPoolRequest +from .types.compute import SetCommonInstanceMetadataProjectRequest +from .types.compute import SetDefaultNetworkTierProjectRequest +from .types.compute import SetDeletionProtectionInstanceRequest +from .types.compute import SetDiskAutoDeleteInstanceRequest +from .types.compute import SetIamPolicyDiskRequest +from .types.compute import SetIamPolicyFirewallPolicyRequest +from .types.compute import SetIamPolicyImageRequest +from .types.compute import SetIamPolicyInstanceRequest +from .types.compute import SetIamPolicyInstanceTemplateRequest +from .types.compute import SetIamPolicyLicenseRequest +from .types.compute import SetIamPolicyNodeGroupRequest +from .types.compute import SetIamPolicyNodeTemplateRequest +from .types.compute import SetIamPolicyRegionDiskRequest +from .types.compute import SetIamPolicyReservationRequest +from .types.compute import SetIamPolicyResourcePolicyRequest +from .types.compute import SetIamPolicyServiceAttachmentRequest +from .types.compute import SetIamPolicySnapshotRequest +from .types.compute import SetIamPolicySubnetworkRequest +from .types.compute import SetInstanceTemplateInstanceGroupManagerRequest +from .types.compute import SetInstanceTemplateRegionInstanceGroupManagerRequest +from .types.compute import SetLabelsDiskRequest +from .types.compute import SetLabelsExternalVpnGatewayRequest +from .types.compute import SetLabelsForwardingRuleRequest +from .types.compute import SetLabelsGlobalForwardingRuleRequest +from .types.compute import SetLabelsImageRequest +from .types.compute import SetLabelsInstanceRequest +from .types.compute import SetLabelsRegionDiskRequest +from .types.compute import SetLabelsSnapshotRequest +from .types.compute import SetLabelsVpnGatewayRequest +from .types.compute import SetMachineResourcesInstanceRequest +from .types.compute import SetMachineTypeInstanceRequest +from .types.compute import SetMetadataInstanceRequest +from .types.compute import SetMinCpuPlatformInstanceRequest +from .types.compute import SetNamedPortsInstanceGroupRequest +from .types.compute import SetNamedPortsRegionInstanceGroupRequest +from .types.compute import SetNodeTemplateNodeGroupRequest +from .types.compute import SetPrivateIpGoogleAccessSubnetworkRequest +from .types.compute import SetProxyHeaderTargetSslProxyRequest +from .types.compute import SetProxyHeaderTargetTcpProxyRequest +from .types.compute import SetQuicOverrideTargetHttpsProxyRequest +from .types.compute import SetSchedulingInstanceRequest +from .types.compute import SetSecurityPolicyBackendServiceRequest +from .types.compute import SetServiceAccountInstanceRequest +from .types.compute import SetShieldedInstanceIntegrityPolicyInstanceRequest +from .types.compute import SetSslCertificatesRegionTargetHttpsProxyRequest +from .types.compute import SetSslCertificatesTargetHttpsProxyRequest +from .types.compute import SetSslCertificatesTargetSslProxyRequest +from .types.compute import SetSslPolicyTargetHttpsProxyRequest +from .types.compute import SetSslPolicyTargetSslProxyRequest +from .types.compute import SetTagsInstanceRequest +from .types.compute import SetTargetForwardingRuleRequest +from .types.compute import SetTargetGlobalForwardingRuleRequest +from .types.compute import SetTargetPoolsInstanceGroupManagerRequest +from .types.compute import SetTargetPoolsRegionInstanceGroupManagerRequest +from .types.compute import SetUrlMapRegionTargetHttpProxyRequest +from .types.compute import SetUrlMapRegionTargetHttpsProxyRequest +from .types.compute import SetUrlMapTargetHttpProxyRequest +from .types.compute import SetUrlMapTargetHttpsProxyRequest +from .types.compute import SetUsageExportBucketProjectRequest +from .types.compute import ShieldedInstanceConfig +from .types.compute import ShieldedInstanceIdentity +from .types.compute import ShieldedInstanceIdentityEntry +from .types.compute import ShieldedInstanceIntegrityPolicy +from .types.compute import SignedUrlKey +from .types.compute import SimulateMaintenanceEventInstanceRequest +from .types.compute import Snapshot +from .types.compute import SnapshotList +from .types.compute import SourceInstanceParams +from .types.compute import SslCertificate +from .types.compute import SslCertificateAggregatedList +from .types.compute import SslCertificateList +from .types.compute import SslCertificateManagedSslCertificate +from .types.compute import SslCertificateSelfManagedSslCertificate +from .types.compute import SslCertificatesScopedList +from .types.compute import SSLHealthCheck +from .types.compute import SslPoliciesList +from .types.compute import SslPoliciesListAvailableFeaturesResponse +from .types.compute import SslPolicy +from .types.compute import SslPolicyReference +from .types.compute import StartInstanceRequest +from .types.compute import StartWithEncryptionKeyInstanceRequest +from .types.compute import StatefulPolicy +from .types.compute import StatefulPolicyPreservedState +from .types.compute import StatefulPolicyPreservedStateDiskDevice +from .types.compute import StopInstanceRequest +from .types.compute import Subnetwork +from .types.compute import SubnetworkAggregatedList +from .types.compute import SubnetworkList +from .types.compute import SubnetworkLogConfig +from .types.compute import SubnetworkSecondaryRange +from .types.compute import SubnetworksExpandIpCidrRangeRequest +from .types.compute import SubnetworksScopedList +from .types.compute import SubnetworksSetPrivateIpGoogleAccessRequest +from .types.compute import Subsetting +from .types.compute import SwitchToCustomModeNetworkRequest +from .types.compute import Tags +from .types.compute import TargetGrpcProxy +from .types.compute import TargetGrpcProxyList +from .types.compute import TargetHttpProxiesScopedList +from .types.compute import TargetHttpProxy +from .types.compute import TargetHttpProxyAggregatedList +from .types.compute import TargetHttpProxyList +from .types.compute import TargetHttpsProxiesScopedList +from .types.compute import TargetHttpsProxiesSetQuicOverrideRequest +from .types.compute import TargetHttpsProxiesSetSslCertificatesRequest +from .types.compute import TargetHttpsProxy +from .types.compute import TargetHttpsProxyAggregatedList +from .types.compute import TargetHttpsProxyList +from .types.compute import TargetInstance +from .types.compute import TargetInstanceAggregatedList +from .types.compute import TargetInstanceList +from .types.compute import TargetInstancesScopedList +from .types.compute import TargetPool +from .types.compute import TargetPoolAggregatedList +from .types.compute import TargetPoolInstanceHealth +from .types.compute import TargetPoolList +from .types.compute import TargetPoolsAddHealthCheckRequest +from .types.compute import TargetPoolsAddInstanceRequest +from .types.compute import TargetPoolsRemoveHealthCheckRequest +from .types.compute import TargetPoolsRemoveInstanceRequest +from .types.compute import TargetPoolsScopedList +from .types.compute import TargetReference +from .types.compute import TargetSslProxiesSetBackendServiceRequest +from .types.compute import TargetSslProxiesSetProxyHeaderRequest +from .types.compute import TargetSslProxiesSetSslCertificatesRequest +from .types.compute import TargetSslProxy +from .types.compute import TargetSslProxyList +from .types.compute import TargetTcpProxiesSetBackendServiceRequest +from .types.compute import TargetTcpProxiesSetProxyHeaderRequest +from .types.compute import TargetTcpProxy +from .types.compute import TargetTcpProxyList +from .types.compute import TargetVpnGateway +from .types.compute import TargetVpnGatewayAggregatedList +from .types.compute import TargetVpnGatewayList +from .types.compute import TargetVpnGatewaysScopedList +from .types.compute import TCPHealthCheck +from .types.compute import TestFailure +from .types.compute import TestIamPermissionsDiskRequest +from .types.compute import TestIamPermissionsExternalVpnGatewayRequest +from .types.compute import TestIamPermissionsFirewallPolicyRequest +from .types.compute import TestIamPermissionsImageRequest +from .types.compute import TestIamPermissionsInstanceRequest +from .types.compute import TestIamPermissionsInstanceTemplateRequest +from .types.compute import TestIamPermissionsLicenseCodeRequest +from .types.compute import TestIamPermissionsLicenseRequest +from .types.compute import TestIamPermissionsNetworkEndpointGroupRequest +from .types.compute import TestIamPermissionsNodeGroupRequest +from .types.compute import TestIamPermissionsNodeTemplateRequest +from .types.compute import TestIamPermissionsPacketMirroringRequest +from .types.compute import TestIamPermissionsRegionDiskRequest +from .types.compute import TestIamPermissionsReservationRequest +from .types.compute import TestIamPermissionsResourcePolicyRequest +from .types.compute import TestIamPermissionsServiceAttachmentRequest +from .types.compute import TestIamPermissionsSnapshotRequest +from .types.compute import TestIamPermissionsSubnetworkRequest +from .types.compute import TestIamPermissionsVpnGatewayRequest +from .types.compute import TestPermissionsRequest +from .types.compute import TestPermissionsResponse +from .types.compute import Uint128 +from .types.compute import UpdateAccessConfigInstanceRequest +from .types.compute import UpdateAutoscalerRequest +from .types.compute import UpdateBackendBucketRequest +from .types.compute import UpdateBackendServiceRequest +from .types.compute import UpdateDisplayDeviceInstanceRequest +from .types.compute import UpdateFirewallRequest +from .types.compute import UpdateHealthCheckRequest +from .types.compute import UpdateInstanceRequest +from .types.compute import UpdateNetworkInterfaceInstanceRequest +from .types.compute import UpdatePeeringNetworkRequest +from .types.compute import UpdatePerInstanceConfigsInstanceGroupManagerRequest +from .types.compute import UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest +from .types.compute import UpdateRegionAutoscalerRequest +from .types.compute import UpdateRegionBackendServiceRequest +from .types.compute import UpdateRegionHealthCheckRequest +from .types.compute import UpdateRegionUrlMapRequest +from .types.compute import UpdateRouterRequest +from .types.compute import UpdateShieldedInstanceConfigInstanceRequest +from .types.compute import UpdateUrlMapRequest +from .types.compute import UrlMap +from .types.compute import UrlMapList +from .types.compute import UrlMapReference +from .types.compute import UrlMapsAggregatedList +from .types.compute import UrlMapsScopedList +from .types.compute import UrlMapsValidateRequest +from .types.compute import UrlMapsValidateResponse +from .types.compute import UrlMapTest +from .types.compute import UrlMapTestHeader +from .types.compute import UrlMapValidationResult +from .types.compute import UrlRewrite +from .types.compute import UsableSubnetwork +from .types.compute import UsableSubnetworksAggregatedList +from .types.compute import UsableSubnetworkSecondaryRange +from .types.compute import UsageExportLocation +from .types.compute import ValidateRegionUrlMapRequest +from .types.compute import ValidateUrlMapRequest +from .types.compute import VmEndpointNatMappings +from .types.compute import VmEndpointNatMappingsInterfaceNatMappings +from .types.compute import VmEndpointNatMappingsList +from .types.compute import VpnGateway +from .types.compute import VpnGatewayAggregatedList +from .types.compute import VpnGatewayList +from .types.compute import VpnGatewaysGetStatusResponse +from .types.compute import VpnGatewaysScopedList +from .types.compute import VpnGatewayStatus +from .types.compute import VpnGatewayStatusHighAvailabilityRequirementState +from .types.compute import VpnGatewayStatusTunnel +from .types.compute import VpnGatewayStatusVpnConnection +from .types.compute import VpnGatewayVpnGatewayInterface +from .types.compute import VpnTunnel +from .types.compute import VpnTunnelAggregatedList +from .types.compute import VpnTunnelList +from .types.compute import VpnTunnelsScopedList +from .types.compute import WafExpressionSet +from .types.compute import WafExpressionSetExpression +from .types.compute import WaitGlobalOperationRequest +from .types.compute import WaitRegionOperationRequest +from .types.compute import WaitZoneOperationRequest +from .types.compute import Warning +from .types.compute import Warnings +from .types.compute import WeightedBackendService +from .types.compute import XpnHostList +from .types.compute import XpnResourceId +from .types.compute import Zone +from .types.compute import ZoneList +from .types.compute import ZoneSetLabelsRequest +from .types.compute import ZoneSetPolicyRequest + +__all__ = ( +'AbandonInstancesInstanceGroupManagerRequest', +'AbandonInstancesRegionInstanceGroupManagerRequest', +'AcceleratorConfig', +'AcceleratorType', +'AcceleratorTypeAggregatedList', +'AcceleratorTypeList', +'AcceleratorTypesClient', +'AcceleratorTypesScopedList', +'Accelerators', +'AccessConfig', +'AddAccessConfigInstanceRequest', +'AddAssociationFirewallPolicyRequest', +'AddHealthCheckTargetPoolRequest', +'AddInstanceTargetPoolRequest', +'AddInstancesInstanceGroupRequest', +'AddNodesNodeGroupRequest', +'AddPeeringNetworkRequest', +'AddResourcePoliciesDiskRequest', +'AddResourcePoliciesInstanceRequest', +'AddResourcePoliciesRegionDiskRequest', +'AddRuleFirewallPolicyRequest', +'AddRuleSecurityPolicyRequest', +'AddSignedUrlKeyBackendBucketRequest', +'AddSignedUrlKeyBackendServiceRequest', +'Address', +'AddressAggregatedList', +'AddressList', +'AddressesClient', +'AddressesScopedList', +'AdvancedMachineFeatures', +'AggregatedListAcceleratorTypesRequest', +'AggregatedListAddressesRequest', +'AggregatedListAutoscalersRequest', +'AggregatedListBackendServicesRequest', +'AggregatedListDiskTypesRequest', +'AggregatedListDisksRequest', +'AggregatedListForwardingRulesRequest', +'AggregatedListGlobalOperationsRequest', +'AggregatedListHealthChecksRequest', +'AggregatedListInstanceGroupManagersRequest', +'AggregatedListInstanceGroupsRequest', +'AggregatedListInstancesRequest', +'AggregatedListInterconnectAttachmentsRequest', +'AggregatedListMachineTypesRequest', +'AggregatedListNetworkEndpointGroupsRequest', +'AggregatedListNodeGroupsRequest', +'AggregatedListNodeTemplatesRequest', +'AggregatedListNodeTypesRequest', +'AggregatedListPacketMirroringsRequest', +'AggregatedListPublicDelegatedPrefixesRequest', +'AggregatedListRegionCommitmentsRequest', +'AggregatedListReservationsRequest', +'AggregatedListResourcePoliciesRequest', +'AggregatedListRoutersRequest', +'AggregatedListServiceAttachmentsRequest', +'AggregatedListSslCertificatesRequest', +'AggregatedListSubnetworksRequest', +'AggregatedListTargetHttpProxiesRequest', +'AggregatedListTargetHttpsProxiesRequest', +'AggregatedListTargetInstancesRequest', +'AggregatedListTargetPoolsRequest', +'AggregatedListTargetVpnGatewaysRequest', +'AggregatedListUrlMapsRequest', +'AggregatedListVpnGatewaysRequest', +'AggregatedListVpnTunnelsRequest', +'AliasIpRange', +'AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk', +'AllocationSpecificSKUAllocationReservedInstanceProperties', +'AllocationSpecificSKUReservation', +'Allowed', +'ApplyUpdatesToInstancesInstanceGroupManagerRequest', +'ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest', +'AttachDiskInstanceRequest', +'AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest', +'AttachNetworkEndpointsNetworkEndpointGroupRequest', +'AttachedDisk', +'AttachedDiskInitializeParams', +'AuditConfig', +'AuditLogConfig', +'AuthorizationLoggingOptions', +'Autoscaler', +'AutoscalerAggregatedList', +'AutoscalerList', +'AutoscalerStatusDetails', +'AutoscalersClient', +'AutoscalersScopedList', +'AutoscalingPolicy', +'AutoscalingPolicyCpuUtilization', +'AutoscalingPolicyCustomMetricUtilization', +'AutoscalingPolicyLoadBalancingUtilization', +'AutoscalingPolicyScaleInControl', +'AutoscalingPolicyScalingSchedule', +'Backend', +'BackendBucket', +'BackendBucketCdnPolicy', +'BackendBucketCdnPolicyBypassCacheOnRequestHeader', +'BackendBucketCdnPolicyNegativeCachingPolicy', +'BackendBucketList', +'BackendBucketsClient', +'BackendService', +'BackendServiceAggregatedList', +'BackendServiceCdnPolicy', +'BackendServiceCdnPolicyBypassCacheOnRequestHeader', +'BackendServiceCdnPolicyNegativeCachingPolicy', +'BackendServiceFailoverPolicy', +'BackendServiceGroupHealth', +'BackendServiceIAP', +'BackendServiceList', +'BackendServiceLogConfig', +'BackendServiceReference', +'BackendServicesClient', +'BackendServicesScopedList', +'Binding', +'BulkInsertInstanceRequest', +'BulkInsertInstanceResource', +'BulkInsertInstanceResourcePerInstanceProperties', +'BulkInsertRegionInstanceRequest', +'CacheInvalidationRule', +'CacheKeyPolicy', +'CircuitBreakers', +'CloneRulesFirewallPolicyRequest', +'Commitment', +'CommitmentAggregatedList', +'CommitmentList', +'CommitmentsScopedList', +'Condition', +'ConfidentialInstanceConfig', +'ConnectionDraining', +'ConsistentHashLoadBalancerSettings', +'ConsistentHashLoadBalancerSettingsHttpCookie', +'CorsPolicy', +'CreateInstancesInstanceGroupManagerRequest', +'CreateInstancesRegionInstanceGroupManagerRequest', +'CreateSnapshotDiskRequest', +'CreateSnapshotRegionDiskRequest', +'CustomerEncryptionKey', +'CustomerEncryptionKeyProtectedDisk', +'Data', +'DeleteAccessConfigInstanceRequest', +'DeleteAddressRequest', +'DeleteAutoscalerRequest', +'DeleteBackendBucketRequest', +'DeleteBackendServiceRequest', +'DeleteDiskRequest', +'DeleteExternalVpnGatewayRequest', +'DeleteFirewallPolicyRequest', +'DeleteFirewallRequest', +'DeleteForwardingRuleRequest', +'DeleteGlobalAddressRequest', +'DeleteGlobalForwardingRuleRequest', +'DeleteGlobalNetworkEndpointGroupRequest', +'DeleteGlobalOperationRequest', +'DeleteGlobalOperationResponse', +'DeleteGlobalOrganizationOperationRequest', +'DeleteGlobalOrganizationOperationResponse', +'DeleteGlobalPublicDelegatedPrefixeRequest', +'DeleteHealthCheckRequest', +'DeleteImageRequest', +'DeleteInstanceGroupManagerRequest', +'DeleteInstanceGroupRequest', +'DeleteInstanceRequest', +'DeleteInstanceTemplateRequest', +'DeleteInstancesInstanceGroupManagerRequest', +'DeleteInstancesRegionInstanceGroupManagerRequest', +'DeleteInterconnectAttachmentRequest', +'DeleteInterconnectRequest', +'DeleteLicenseRequest', +'DeleteNetworkEndpointGroupRequest', +'DeleteNetworkRequest', +'DeleteNodeGroupRequest', +'DeleteNodeTemplateRequest', +'DeleteNodesNodeGroupRequest', +'DeletePacketMirroringRequest', +'DeletePerInstanceConfigsInstanceGroupManagerRequest', +'DeletePerInstanceConfigsRegionInstanceGroupManagerRequest', +'DeletePublicAdvertisedPrefixeRequest', +'DeletePublicDelegatedPrefixeRequest', +'DeleteRegionAutoscalerRequest', +'DeleteRegionBackendServiceRequest', +'DeleteRegionDiskRequest', +'DeleteRegionHealthCheckRequest', +'DeleteRegionHealthCheckServiceRequest', +'DeleteRegionInstanceGroupManagerRequest', +'DeleteRegionNetworkEndpointGroupRequest', +'DeleteRegionNotificationEndpointRequest', +'DeleteRegionOperationRequest', +'DeleteRegionOperationResponse', +'DeleteRegionSslCertificateRequest', +'DeleteRegionTargetHttpProxyRequest', +'DeleteRegionTargetHttpsProxyRequest', +'DeleteRegionUrlMapRequest', +'DeleteReservationRequest', +'DeleteResourcePolicyRequest', +'DeleteRouteRequest', +'DeleteRouterRequest', +'DeleteSecurityPolicyRequest', +'DeleteServiceAttachmentRequest', +'DeleteSignedUrlKeyBackendBucketRequest', +'DeleteSignedUrlKeyBackendServiceRequest', +'DeleteSnapshotRequest', +'DeleteSslCertificateRequest', +'DeleteSslPolicyRequest', +'DeleteSubnetworkRequest', +'DeleteTargetGrpcProxyRequest', +'DeleteTargetHttpProxyRequest', +'DeleteTargetHttpsProxyRequest', +'DeleteTargetInstanceRequest', +'DeleteTargetPoolRequest', +'DeleteTargetSslProxyRequest', +'DeleteTargetTcpProxyRequest', +'DeleteTargetVpnGatewayRequest', +'DeleteUrlMapRequest', +'DeleteVpnGatewayRequest', +'DeleteVpnTunnelRequest', +'DeleteZoneOperationRequest', +'DeleteZoneOperationResponse', +'Denied', +'DeprecateImageRequest', +'DeprecationStatus', +'DetachDiskInstanceRequest', +'DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest', +'DetachNetworkEndpointsNetworkEndpointGroupRequest', +'DisableXpnHostProjectRequest', +'DisableXpnResourceProjectRequest', +'Disk', +'DiskAggregatedList', +'DiskInstantiationConfig', +'DiskList', +'DiskMoveRequest', +'DiskType', +'DiskTypeAggregatedList', +'DiskTypeList', +'DiskTypesClient', +'DiskTypesScopedList', +'DisksAddResourcePoliciesRequest', +'DisksClient', +'DisksRemoveResourcePoliciesRequest', +'DisksResizeRequest', +'DisksScopedList', +'DisplayDevice', +'DistributionPolicy', +'DistributionPolicyZoneConfiguration', +'Duration', +'EnableXpnHostProjectRequest', +'EnableXpnResourceProjectRequest', +'Error', +'Errors', +'ExchangedPeeringRoute', +'ExchangedPeeringRoutesList', +'ExpandIpCidrRangeSubnetworkRequest', +'Expr', +'ExternalVpnGateway', +'ExternalVpnGatewayInterface', +'ExternalVpnGatewayList', +'ExternalVpnGatewaysClient', +'FileContentBuffer', +'Firewall', +'FirewallList', +'FirewallLogConfig', +'FirewallPoliciesClient', +'FirewallPoliciesListAssociationsResponse', +'FirewallPolicy', +'FirewallPolicyAssociation', +'FirewallPolicyList', +'FirewallPolicyRule', +'FirewallPolicyRuleMatcher', +'FirewallPolicyRuleMatcherLayer4Config', +'FirewallsClient', +'FixedOrPercent', +'ForwardingRule', +'ForwardingRuleAggregatedList', +'ForwardingRuleList', +'ForwardingRuleReference', +'ForwardingRuleServiceDirectoryRegistration', +'ForwardingRulesClient', +'ForwardingRulesScopedList', +'GRPCHealthCheck', +'GetAcceleratorTypeRequest', +'GetAddressRequest', +'GetAssociationFirewallPolicyRequest', +'GetAutoscalerRequest', +'GetBackendBucketRequest', +'GetBackendServiceRequest', +'GetDiagnosticsInterconnectRequest', +'GetDiskRequest', +'GetDiskTypeRequest', +'GetEffectiveFirewallsInstanceRequest', +'GetEffectiveFirewallsNetworkRequest', +'GetExternalVpnGatewayRequest', +'GetFirewallPolicyRequest', +'GetFirewallRequest', +'GetForwardingRuleRequest', +'GetFromFamilyImageRequest', +'GetGlobalAddressRequest', +'GetGlobalForwardingRuleRequest', +'GetGlobalNetworkEndpointGroupRequest', +'GetGlobalOperationRequest', +'GetGlobalOrganizationOperationRequest', +'GetGlobalPublicDelegatedPrefixeRequest', +'GetGuestAttributesInstanceRequest', +'GetHealthBackendServiceRequest', +'GetHealthCheckRequest', +'GetHealthRegionBackendServiceRequest', +'GetHealthTargetPoolRequest', +'GetIamPolicyDiskRequest', +'GetIamPolicyFirewallPolicyRequest', +'GetIamPolicyImageRequest', +'GetIamPolicyInstanceRequest', +'GetIamPolicyInstanceTemplateRequest', +'GetIamPolicyLicenseRequest', +'GetIamPolicyNodeGroupRequest', +'GetIamPolicyNodeTemplateRequest', +'GetIamPolicyRegionDiskRequest', +'GetIamPolicyReservationRequest', +'GetIamPolicyResourcePolicyRequest', +'GetIamPolicyServiceAttachmentRequest', +'GetIamPolicySnapshotRequest', +'GetIamPolicySubnetworkRequest', +'GetImageFamilyViewRequest', +'GetImageRequest', +'GetInstanceGroupManagerRequest', +'GetInstanceGroupRequest', +'GetInstanceRequest', +'GetInstanceTemplateRequest', +'GetInterconnectAttachmentRequest', +'GetInterconnectLocationRequest', +'GetInterconnectRequest', +'GetLicenseCodeRequest', +'GetLicenseRequest', +'GetMachineTypeRequest', +'GetNatMappingInfoRoutersRequest', +'GetNetworkEndpointGroupRequest', +'GetNetworkRequest', +'GetNodeGroupRequest', +'GetNodeTemplateRequest', +'GetNodeTypeRequest', +'GetPacketMirroringRequest', +'GetProjectRequest', +'GetPublicAdvertisedPrefixeRequest', +'GetPublicDelegatedPrefixeRequest', +'GetRegionAutoscalerRequest', +'GetRegionBackendServiceRequest', +'GetRegionCommitmentRequest', +'GetRegionDiskRequest', +'GetRegionDiskTypeRequest', +'GetRegionHealthCheckRequest', +'GetRegionHealthCheckServiceRequest', +'GetRegionInstanceGroupManagerRequest', +'GetRegionInstanceGroupRequest', +'GetRegionNetworkEndpointGroupRequest', +'GetRegionNotificationEndpointRequest', +'GetRegionOperationRequest', +'GetRegionRequest', +'GetRegionSslCertificateRequest', +'GetRegionTargetHttpProxyRequest', +'GetRegionTargetHttpsProxyRequest', +'GetRegionUrlMapRequest', +'GetReservationRequest', +'GetResourcePolicyRequest', +'GetRouteRequest', +'GetRouterRequest', +'GetRouterStatusRouterRequest', +'GetRuleFirewallPolicyRequest', +'GetRuleSecurityPolicyRequest', +'GetScreenshotInstanceRequest', +'GetSecurityPolicyRequest', +'GetSerialPortOutputInstanceRequest', +'GetServiceAttachmentRequest', +'GetShieldedInstanceIdentityInstanceRequest', +'GetSnapshotRequest', +'GetSslCertificateRequest', +'GetSslPolicyRequest', +'GetStatusVpnGatewayRequest', +'GetSubnetworkRequest', +'GetTargetGrpcProxyRequest', +'GetTargetHttpProxyRequest', +'GetTargetHttpsProxyRequest', +'GetTargetInstanceRequest', +'GetTargetPoolRequest', +'GetTargetSslProxyRequest', +'GetTargetTcpProxyRequest', +'GetTargetVpnGatewayRequest', +'GetUrlMapRequest', +'GetVpnGatewayRequest', +'GetVpnTunnelRequest', +'GetXpnHostProjectRequest', +'GetXpnResourcesProjectsRequest', +'GetZoneOperationRequest', +'GetZoneRequest', +'GlobalAddressesClient', +'GlobalForwardingRulesClient', +'GlobalNetworkEndpointGroupsAttachEndpointsRequest', +'GlobalNetworkEndpointGroupsClient', +'GlobalNetworkEndpointGroupsDetachEndpointsRequest', +'GlobalOperationsClient', +'GlobalOrganizationOperationsClient', +'GlobalOrganizationSetPolicyRequest', +'GlobalPublicDelegatedPrefixesClient', +'GlobalSetLabelsRequest', +'GlobalSetPolicyRequest', +'GuestAttributes', +'GuestAttributesEntry', +'GuestAttributesValue', +'GuestOsFeature', +'HTTP2HealthCheck', +'HTTPHealthCheck', +'HTTPSHealthCheck', +'HealthCheck', +'HealthCheckList', +'HealthCheckLogConfig', +'HealthCheckReference', +'HealthCheckService', +'HealthCheckServiceReference', +'HealthCheckServicesList', +'HealthChecksAggregatedList', +'HealthChecksClient', +'HealthChecksScopedList', +'HealthStatus', +'HealthStatusForNetworkEndpoint', +'HostRule', +'HttpFaultAbort', +'HttpFaultDelay', +'HttpFaultInjection', +'HttpHeaderAction', +'HttpHeaderMatch', +'HttpHeaderOption', +'HttpQueryParameterMatch', +'HttpRedirectAction', +'HttpRetryPolicy', +'HttpRouteAction', +'HttpRouteRule', +'HttpRouteRuleMatch', +'Image', +'ImageFamilyView', +'ImageFamilyViewsClient', +'ImageList', +'ImagesClient', +'InitialStateConfig', +'InsertAddressRequest', +'InsertAutoscalerRequest', +'InsertBackendBucketRequest', +'InsertBackendServiceRequest', +'InsertDiskRequest', +'InsertExternalVpnGatewayRequest', +'InsertFirewallPolicyRequest', +'InsertFirewallRequest', +'InsertForwardingRuleRequest', +'InsertGlobalAddressRequest', +'InsertGlobalForwardingRuleRequest', +'InsertGlobalNetworkEndpointGroupRequest', +'InsertGlobalPublicDelegatedPrefixeRequest', +'InsertHealthCheckRequest', +'InsertImageRequest', +'InsertInstanceGroupManagerRequest', +'InsertInstanceGroupRequest', +'InsertInstanceRequest', +'InsertInstanceTemplateRequest', +'InsertInterconnectAttachmentRequest', +'InsertInterconnectRequest', +'InsertLicenseRequest', +'InsertNetworkEndpointGroupRequest', +'InsertNetworkRequest', +'InsertNodeGroupRequest', +'InsertNodeTemplateRequest', +'InsertPacketMirroringRequest', +'InsertPublicAdvertisedPrefixeRequest', +'InsertPublicDelegatedPrefixeRequest', +'InsertRegionAutoscalerRequest', +'InsertRegionBackendServiceRequest', +'InsertRegionCommitmentRequest', +'InsertRegionDiskRequest', +'InsertRegionHealthCheckRequest', +'InsertRegionHealthCheckServiceRequest', +'InsertRegionInstanceGroupManagerRequest', +'InsertRegionNetworkEndpointGroupRequest', +'InsertRegionNotificationEndpointRequest', +'InsertRegionSslCertificateRequest', +'InsertRegionTargetHttpProxyRequest', +'InsertRegionTargetHttpsProxyRequest', +'InsertRegionUrlMapRequest', +'InsertReservationRequest', +'InsertResourcePolicyRequest', +'InsertRouteRequest', +'InsertRouterRequest', +'InsertSecurityPolicyRequest', +'InsertServiceAttachmentRequest', +'InsertSslCertificateRequest', +'InsertSslPolicyRequest', +'InsertSubnetworkRequest', +'InsertTargetGrpcProxyRequest', +'InsertTargetHttpProxyRequest', +'InsertTargetHttpsProxyRequest', +'InsertTargetInstanceRequest', +'InsertTargetPoolRequest', +'InsertTargetSslProxyRequest', +'InsertTargetTcpProxyRequest', +'InsertTargetVpnGatewayRequest', +'InsertUrlMapRequest', +'InsertVpnGatewayRequest', +'InsertVpnTunnelRequest', +'Instance', +'InstanceAggregatedList', +'InstanceGroup', +'InstanceGroupAggregatedList', +'InstanceGroupList', +'InstanceGroupManager', +'InstanceGroupManagerActionsSummary', +'InstanceGroupManagerAggregatedList', +'InstanceGroupManagerAutoHealingPolicy', +'InstanceGroupManagerList', +'InstanceGroupManagerStatus', +'InstanceGroupManagerStatusStateful', +'InstanceGroupManagerStatusStatefulPerInstanceConfigs', +'InstanceGroupManagerStatusVersionTarget', +'InstanceGroupManagerUpdatePolicy', +'InstanceGroupManagerVersion', +'InstanceGroupManagersAbandonInstancesRequest', +'InstanceGroupManagersApplyUpdatesRequest', +'InstanceGroupManagersClient', +'InstanceGroupManagersCreateInstancesRequest', +'InstanceGroupManagersDeleteInstancesRequest', +'InstanceGroupManagersDeletePerInstanceConfigsReq', +'InstanceGroupManagersListErrorsResponse', +'InstanceGroupManagersListManagedInstancesResponse', +'InstanceGroupManagersListPerInstanceConfigsResp', +'InstanceGroupManagersPatchPerInstanceConfigsReq', +'InstanceGroupManagersRecreateInstancesRequest', +'InstanceGroupManagersScopedList', +'InstanceGroupManagersSetInstanceTemplateRequest', +'InstanceGroupManagersSetTargetPoolsRequest', +'InstanceGroupManagersUpdatePerInstanceConfigsReq', +'InstanceGroupsAddInstancesRequest', +'InstanceGroupsClient', +'InstanceGroupsListInstances', +'InstanceGroupsListInstancesRequest', +'InstanceGroupsRemoveInstancesRequest', +'InstanceGroupsScopedList', +'InstanceGroupsSetNamedPortsRequest', +'InstanceList', +'InstanceListReferrers', +'InstanceManagedByIgmError', +'InstanceManagedByIgmErrorInstanceActionDetails', +'InstanceManagedByIgmErrorManagedInstanceError', +'InstanceMoveRequest', +'InstanceProperties', +'InstanceReference', +'InstanceTemplate', +'InstanceTemplateList', +'InstanceTemplatesClient', +'InstanceWithNamedPorts', +'InstancesAddResourcePoliciesRequest', +'InstancesClient', +'InstancesGetEffectiveFirewallsResponse', +'InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy', +'InstancesRemoveResourcePoliciesRequest', +'InstancesScopedList', +'InstancesSetLabelsRequest', +'InstancesSetMachineResourcesRequest', +'InstancesSetMachineTypeRequest', +'InstancesSetMinCpuPlatformRequest', +'InstancesSetServiceAccountRequest', +'InstancesStartWithEncryptionKeyRequest', +'Int64RangeMatch', +'Interconnect', +'InterconnectAttachment', +'InterconnectAttachmentAggregatedList', +'InterconnectAttachmentList', +'InterconnectAttachmentPartnerMetadata', +'InterconnectAttachmentPrivateInfo', +'InterconnectAttachmentsClient', +'InterconnectAttachmentsScopedList', +'InterconnectCircuitInfo', +'InterconnectDiagnostics', +'InterconnectDiagnosticsARPEntry', +'InterconnectDiagnosticsLinkLACPStatus', +'InterconnectDiagnosticsLinkOpticalPower', +'InterconnectDiagnosticsLinkStatus', +'InterconnectList', +'InterconnectLocation', +'InterconnectLocationList', +'InterconnectLocationRegionInfo', +'InterconnectLocationsClient', +'InterconnectOutageNotification', +'InterconnectsClient', +'InterconnectsGetDiagnosticsResponse', +'InvalidateCacheUrlMapRequest', +'Items', +'License', +'LicenseCode', +'LicenseCodeLicenseAlias', +'LicenseCodesClient', +'LicenseResourceCommitment', +'LicenseResourceRequirements', +'LicensesClient', +'LicensesListResponse', +'ListAcceleratorTypesRequest', +'ListAddressesRequest', +'ListAssociationsFirewallPolicyRequest', +'ListAutoscalersRequest', +'ListAvailableFeaturesSslPoliciesRequest', +'ListBackendBucketsRequest', +'ListBackendServicesRequest', +'ListDiskTypesRequest', +'ListDisksRequest', +'ListErrorsInstanceGroupManagersRequest', +'ListErrorsRegionInstanceGroupManagersRequest', +'ListExternalVpnGatewaysRequest', +'ListFirewallPoliciesRequest', +'ListFirewallsRequest', +'ListForwardingRulesRequest', +'ListGlobalAddressesRequest', +'ListGlobalForwardingRulesRequest', +'ListGlobalNetworkEndpointGroupsRequest', +'ListGlobalOperationsRequest', +'ListGlobalOrganizationOperationsRequest', +'ListGlobalPublicDelegatedPrefixesRequest', +'ListHealthChecksRequest', +'ListImagesRequest', +'ListInstanceGroupManagersRequest', +'ListInstanceGroupsRequest', +'ListInstanceTemplatesRequest', +'ListInstancesInstanceGroupsRequest', +'ListInstancesRegionInstanceGroupsRequest', +'ListInstancesRequest', +'ListInterconnectAttachmentsRequest', +'ListInterconnectLocationsRequest', +'ListInterconnectsRequest', +'ListLicensesRequest', +'ListMachineTypesRequest', +'ListManagedInstancesInstanceGroupManagersRequest', +'ListManagedInstancesRegionInstanceGroupManagersRequest', +'ListNetworkEndpointGroupsRequest', +'ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest', +'ListNetworkEndpointsNetworkEndpointGroupsRequest', +'ListNetworksRequest', +'ListNodeGroupsRequest', +'ListNodeTemplatesRequest', +'ListNodeTypesRequest', +'ListNodesNodeGroupsRequest', +'ListPacketMirroringsRequest', +'ListPeeringRoutesNetworksRequest', +'ListPerInstanceConfigsInstanceGroupManagersRequest', +'ListPerInstanceConfigsRegionInstanceGroupManagersRequest', +'ListPreconfiguredExpressionSetsSecurityPoliciesRequest', +'ListPublicAdvertisedPrefixesRequest', +'ListPublicDelegatedPrefixesRequest', +'ListReferrersInstancesRequest', +'ListRegionAutoscalersRequest', +'ListRegionBackendServicesRequest', +'ListRegionCommitmentsRequest', +'ListRegionDiskTypesRequest', +'ListRegionDisksRequest', +'ListRegionHealthCheckServicesRequest', +'ListRegionHealthChecksRequest', +'ListRegionInstanceGroupManagersRequest', +'ListRegionInstanceGroupsRequest', +'ListRegionNetworkEndpointGroupsRequest', +'ListRegionNotificationEndpointsRequest', +'ListRegionOperationsRequest', +'ListRegionSslCertificatesRequest', +'ListRegionTargetHttpProxiesRequest', +'ListRegionTargetHttpsProxiesRequest', +'ListRegionUrlMapsRequest', +'ListRegionsRequest', +'ListReservationsRequest', +'ListResourcePoliciesRequest', +'ListRoutersRequest', +'ListRoutesRequest', +'ListSecurityPoliciesRequest', +'ListServiceAttachmentsRequest', +'ListSnapshotsRequest', +'ListSslCertificatesRequest', +'ListSslPoliciesRequest', +'ListSubnetworksRequest', +'ListTargetGrpcProxiesRequest', +'ListTargetHttpProxiesRequest', +'ListTargetHttpsProxiesRequest', +'ListTargetInstancesRequest', +'ListTargetPoolsRequest', +'ListTargetSslProxiesRequest', +'ListTargetTcpProxiesRequest', +'ListTargetVpnGatewaysRequest', +'ListUrlMapsRequest', +'ListUsableSubnetworksRequest', +'ListVpnGatewaysRequest', +'ListVpnTunnelsRequest', +'ListXpnHostsProjectsRequest', +'ListZoneOperationsRequest', +'ListZonesRequest', +'LocalDisk', +'LocationPolicy', +'LocationPolicyLocation', +'LogConfig', +'LogConfigCloudAuditOptions', +'LogConfigCounterOptions', +'LogConfigCounterOptionsCustomField', +'LogConfigDataAccessOptions', +'MachineType', +'MachineTypeAggregatedList', +'MachineTypeList', +'MachineTypesClient', +'MachineTypesScopedList', +'ManagedInstance', +'ManagedInstanceInstanceHealth', +'ManagedInstanceLastAttempt', +'ManagedInstanceVersion', +'Metadata', +'MetadataFilter', +'MetadataFilterLabelMatch', +'MoveDiskProjectRequest', +'MoveFirewallPolicyRequest', +'MoveInstanceProjectRequest', +'NamedPort', +'Network', +'NetworkEndpoint', +'NetworkEndpointGroup', +'NetworkEndpointGroupAggregatedList', +'NetworkEndpointGroupAppEngine', +'NetworkEndpointGroupCloudFunction', +'NetworkEndpointGroupCloudRun', +'NetworkEndpointGroupList', +'NetworkEndpointGroupsAttachEndpointsRequest', +'NetworkEndpointGroupsClient', +'NetworkEndpointGroupsDetachEndpointsRequest', +'NetworkEndpointGroupsListEndpointsRequest', +'NetworkEndpointGroupsListNetworkEndpoints', +'NetworkEndpointGroupsScopedList', +'NetworkEndpointWithHealthStatus', +'NetworkInterface', +'NetworkList', +'NetworkPeering', +'NetworkRoutingConfig', +'NetworksAddPeeringRequest', +'NetworksClient', +'NetworksGetEffectiveFirewallsResponse', +'NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy', +'NetworksRemovePeeringRequest', +'NetworksUpdatePeeringRequest', +'NodeGroup', +'NodeGroupAggregatedList', +'NodeGroupAutoscalingPolicy', +'NodeGroupList', +'NodeGroupMaintenanceWindow', +'NodeGroupNode', +'NodeGroupsAddNodesRequest', +'NodeGroupsClient', +'NodeGroupsDeleteNodesRequest', +'NodeGroupsListNodes', +'NodeGroupsScopedList', +'NodeGroupsSetNodeTemplateRequest', +'NodeTemplate', +'NodeTemplateAggregatedList', +'NodeTemplateList', +'NodeTemplateNodeTypeFlexibility', +'NodeTemplatesClient', +'NodeTemplatesScopedList', +'NodeType', +'NodeTypeAggregatedList', +'NodeTypeList', +'NodeTypesClient', +'NodeTypesScopedList', +'NotificationEndpoint', +'NotificationEndpointGrpcSettings', +'NotificationEndpointList', +'Operation', +'OperationAggregatedList', +'OperationList', +'OperationsScopedList', +'OutlierDetection', +'PacketMirroring', +'PacketMirroringAggregatedList', +'PacketMirroringFilter', +'PacketMirroringForwardingRuleInfo', +'PacketMirroringList', +'PacketMirroringMirroredResourceInfo', +'PacketMirroringMirroredResourceInfoInstanceInfo', +'PacketMirroringMirroredResourceInfoSubnetInfo', +'PacketMirroringNetworkInfo', +'PacketMirroringsClient', +'PacketMirroringsScopedList', +'PatchAutoscalerRequest', +'PatchBackendBucketRequest', +'PatchBackendServiceRequest', +'PatchFirewallPolicyRequest', +'PatchFirewallRequest', +'PatchForwardingRuleRequest', +'PatchGlobalForwardingRuleRequest', +'PatchGlobalPublicDelegatedPrefixeRequest', +'PatchHealthCheckRequest', +'PatchImageRequest', +'PatchInstanceGroupManagerRequest', +'PatchInterconnectAttachmentRequest', +'PatchInterconnectRequest', +'PatchNetworkRequest', +'PatchNodeGroupRequest', +'PatchPacketMirroringRequest', +'PatchPerInstanceConfigsInstanceGroupManagerRequest', +'PatchPerInstanceConfigsRegionInstanceGroupManagerRequest', +'PatchPublicAdvertisedPrefixeRequest', +'PatchPublicDelegatedPrefixeRequest', +'PatchRegionAutoscalerRequest', +'PatchRegionBackendServiceRequest', +'PatchRegionHealthCheckRequest', +'PatchRegionHealthCheckServiceRequest', +'PatchRegionInstanceGroupManagerRequest', +'PatchRegionUrlMapRequest', +'PatchRouterRequest', +'PatchRuleFirewallPolicyRequest', +'PatchRuleSecurityPolicyRequest', +'PatchSecurityPolicyRequest', +'PatchServiceAttachmentRequest', +'PatchSslPolicyRequest', +'PatchSubnetworkRequest', +'PatchTargetGrpcProxyRequest', +'PatchTargetHttpProxyRequest', +'PatchTargetHttpsProxyRequest', +'PatchUrlMapRequest', +'PathMatcher', +'PathRule', +'PerInstanceConfig', +'Policy', +'PreconfiguredWafSet', +'PreservedState', +'PreservedStatePreservedDisk', +'PreviewRouterRequest', +'Project', +'ProjectsClient', +'ProjectsDisableXpnResourceRequest', +'ProjectsEnableXpnResourceRequest', +'ProjectsGetXpnResources', +'ProjectsListXpnHostsRequest', +'ProjectsSetDefaultNetworkTierRequest', +'PublicAdvertisedPrefix', +'PublicAdvertisedPrefixList', +'PublicAdvertisedPrefixPublicDelegatedPrefix', +'PublicAdvertisedPrefixesClient', +'PublicDelegatedPrefix', +'PublicDelegatedPrefixAggregatedList', +'PublicDelegatedPrefixList', +'PublicDelegatedPrefixPublicDelegatedSubPrefix', +'PublicDelegatedPrefixesClient', +'PublicDelegatedPrefixesScopedList', +'Quota', +'RawDisk', +'RecreateInstancesInstanceGroupManagerRequest', +'RecreateInstancesRegionInstanceGroupManagerRequest', +'Reference', +'Region', +'RegionAutoscalerList', +'RegionAutoscalersClient', +'RegionBackendServicesClient', +'RegionCommitmentsClient', +'RegionDiskTypeList', +'RegionDiskTypesClient', +'RegionDisksAddResourcePoliciesRequest', +'RegionDisksClient', +'RegionDisksRemoveResourcePoliciesRequest', +'RegionDisksResizeRequest', +'RegionHealthCheckServicesClient', +'RegionHealthChecksClient', +'RegionInstanceGroupList', +'RegionInstanceGroupManagerDeleteInstanceConfigReq', +'RegionInstanceGroupManagerList', +'RegionInstanceGroupManagerPatchInstanceConfigReq', +'RegionInstanceGroupManagerUpdateInstanceConfigReq', +'RegionInstanceGroupManagersAbandonInstancesRequest', +'RegionInstanceGroupManagersApplyUpdatesRequest', +'RegionInstanceGroupManagersClient', +'RegionInstanceGroupManagersCreateInstancesRequest', +'RegionInstanceGroupManagersDeleteInstancesRequest', +'RegionInstanceGroupManagersListErrorsResponse', +'RegionInstanceGroupManagersListInstanceConfigsResp', +'RegionInstanceGroupManagersListInstancesResponse', +'RegionInstanceGroupManagersRecreateRequest', +'RegionInstanceGroupManagersSetTargetPoolsRequest', +'RegionInstanceGroupManagersSetTemplateRequest', +'RegionInstanceGroupsClient', +'RegionInstanceGroupsListInstances', +'RegionInstanceGroupsListInstancesRequest', +'RegionInstanceGroupsSetNamedPortsRequest', +'RegionInstancesClient', +'RegionList', +'RegionNetworkEndpointGroupsClient', +'RegionNotificationEndpointsClient', +'RegionOperationsClient', +'RegionSetLabelsRequest', +'RegionSetPolicyRequest', +'RegionSslCertificatesClient', +'RegionTargetHttpProxiesClient', +'RegionTargetHttpsProxiesClient', +'RegionTargetHttpsProxiesSetSslCertificatesRequest', +'RegionUrlMapsClient', +'RegionUrlMapsValidateRequest', +'RegionsClient', +'RemoveAssociationFirewallPolicyRequest', +'RemoveHealthCheckTargetPoolRequest', +'RemoveInstanceTargetPoolRequest', +'RemoveInstancesInstanceGroupRequest', +'RemovePeeringNetworkRequest', +'RemoveResourcePoliciesDiskRequest', +'RemoveResourcePoliciesInstanceRequest', +'RemoveResourcePoliciesRegionDiskRequest', +'RemoveRuleFirewallPolicyRequest', +'RemoveRuleSecurityPolicyRequest', +'RequestMirrorPolicy', +'Reservation', +'ReservationAffinity', +'ReservationAggregatedList', +'ReservationList', +'ReservationsClient', +'ReservationsResizeRequest', +'ReservationsScopedList', +'ResetInstanceRequest', +'ResizeDiskRequest', +'ResizeInstanceGroupManagerRequest', +'ResizeRegionDiskRequest', +'ResizeRegionInstanceGroupManagerRequest', +'ResizeReservationRequest', +'ResourceCommitment', +'ResourceGroupReference', +'ResourcePoliciesClient', +'ResourcePoliciesScopedList', +'ResourcePolicy', +'ResourcePolicyAggregatedList', +'ResourcePolicyDailyCycle', +'ResourcePolicyGroupPlacementPolicy', +'ResourcePolicyHourlyCycle', +'ResourcePolicyInstanceSchedulePolicy', +'ResourcePolicyInstanceSchedulePolicySchedule', +'ResourcePolicyList', +'ResourcePolicyResourceStatus', +'ResourcePolicyResourceStatusInstanceSchedulePolicyStatus', +'ResourcePolicySnapshotSchedulePolicy', +'ResourcePolicySnapshotSchedulePolicyRetentionPolicy', +'ResourcePolicySnapshotSchedulePolicySchedule', +'ResourcePolicySnapshotSchedulePolicySnapshotProperties', +'ResourcePolicyWeeklyCycle', +'ResourcePolicyWeeklyCycleDayOfWeek', +'Route', +'RouteAsPath', +'RouteList', +'Router', +'RouterAdvertisedIpRange', +'RouterAggregatedList', +'RouterBgp', +'RouterBgpPeer', +'RouterBgpPeerBfd', +'RouterInterface', +'RouterList', +'RouterNat', +'RouterNatLogConfig', +'RouterNatRule', +'RouterNatRuleAction', +'RouterNatSubnetworkToNat', +'RouterStatus', +'RouterStatusBgpPeerStatus', +'RouterStatusNatStatus', +'RouterStatusNatStatusNatRuleStatus', +'RouterStatusResponse', +'RoutersClient', +'RoutersPreviewResponse', +'RoutersScopedList', +'RoutesClient', +'Rule', +'SSLHealthCheck', +'ScalingScheduleStatus', +'Scheduling', +'SchedulingNodeAffinity', +'ScratchDisks', +'Screenshot', +'SecurityPoliciesClient', +'SecurityPoliciesListPreconfiguredExpressionSetsResponse', +'SecurityPoliciesWafConfig', +'SecurityPolicy', +'SecurityPolicyAdaptiveProtectionConfig', +'SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig', +'SecurityPolicyAdvancedOptionsConfig', +'SecurityPolicyList', +'SecurityPolicyReference', +'SecurityPolicyRule', +'SecurityPolicyRuleMatcher', +'SecurityPolicyRuleMatcherConfig', +'SecuritySettings', +'SendDiagnosticInterruptInstanceRequest', +'SendDiagnosticInterruptInstanceResponse', +'SerialPortOutput', +'ServerBinding', +'ServiceAccount', +'ServiceAttachment', +'ServiceAttachmentAggregatedList', +'ServiceAttachmentConnectedEndpoint', +'ServiceAttachmentConsumerProjectLimit', +'ServiceAttachmentList', +'ServiceAttachmentsClient', +'ServiceAttachmentsScopedList', +'SetBackendServiceTargetSslProxyRequest', +'SetBackendServiceTargetTcpProxyRequest', +'SetBackupTargetPoolRequest', +'SetCommonInstanceMetadataProjectRequest', +'SetDefaultNetworkTierProjectRequest', +'SetDeletionProtectionInstanceRequest', +'SetDiskAutoDeleteInstanceRequest', +'SetIamPolicyDiskRequest', +'SetIamPolicyFirewallPolicyRequest', +'SetIamPolicyImageRequest', +'SetIamPolicyInstanceRequest', +'SetIamPolicyInstanceTemplateRequest', +'SetIamPolicyLicenseRequest', +'SetIamPolicyNodeGroupRequest', +'SetIamPolicyNodeTemplateRequest', +'SetIamPolicyRegionDiskRequest', +'SetIamPolicyReservationRequest', +'SetIamPolicyResourcePolicyRequest', +'SetIamPolicyServiceAttachmentRequest', +'SetIamPolicySnapshotRequest', +'SetIamPolicySubnetworkRequest', +'SetInstanceTemplateInstanceGroupManagerRequest', +'SetInstanceTemplateRegionInstanceGroupManagerRequest', +'SetLabelsDiskRequest', +'SetLabelsExternalVpnGatewayRequest', +'SetLabelsForwardingRuleRequest', +'SetLabelsGlobalForwardingRuleRequest', +'SetLabelsImageRequest', +'SetLabelsInstanceRequest', +'SetLabelsRegionDiskRequest', +'SetLabelsSnapshotRequest', +'SetLabelsVpnGatewayRequest', +'SetMachineResourcesInstanceRequest', +'SetMachineTypeInstanceRequest', +'SetMetadataInstanceRequest', +'SetMinCpuPlatformInstanceRequest', +'SetNamedPortsInstanceGroupRequest', +'SetNamedPortsRegionInstanceGroupRequest', +'SetNodeTemplateNodeGroupRequest', +'SetPrivateIpGoogleAccessSubnetworkRequest', +'SetProxyHeaderTargetSslProxyRequest', +'SetProxyHeaderTargetTcpProxyRequest', +'SetQuicOverrideTargetHttpsProxyRequest', +'SetSchedulingInstanceRequest', +'SetSecurityPolicyBackendServiceRequest', +'SetServiceAccountInstanceRequest', +'SetShieldedInstanceIntegrityPolicyInstanceRequest', +'SetSslCertificatesRegionTargetHttpsProxyRequest', +'SetSslCertificatesTargetHttpsProxyRequest', +'SetSslCertificatesTargetSslProxyRequest', +'SetSslPolicyTargetHttpsProxyRequest', +'SetSslPolicyTargetSslProxyRequest', +'SetTagsInstanceRequest', +'SetTargetForwardingRuleRequest', +'SetTargetGlobalForwardingRuleRequest', +'SetTargetPoolsInstanceGroupManagerRequest', +'SetTargetPoolsRegionInstanceGroupManagerRequest', +'SetUrlMapRegionTargetHttpProxyRequest', +'SetUrlMapRegionTargetHttpsProxyRequest', +'SetUrlMapTargetHttpProxyRequest', +'SetUrlMapTargetHttpsProxyRequest', +'SetUsageExportBucketProjectRequest', +'ShieldedInstanceConfig', +'ShieldedInstanceIdentity', +'ShieldedInstanceIdentityEntry', +'ShieldedInstanceIntegrityPolicy', +'SignedUrlKey', +'SimulateMaintenanceEventInstanceRequest', +'Snapshot', +'SnapshotList', +'SnapshotsClient', +'SourceInstanceParams', +'SslCertificate', +'SslCertificateAggregatedList', +'SslCertificateList', +'SslCertificateManagedSslCertificate', +'SslCertificateSelfManagedSslCertificate', +'SslCertificatesClient', +'SslCertificatesScopedList', +'SslPoliciesClient', +'SslPoliciesList', +'SslPoliciesListAvailableFeaturesResponse', +'SslPolicy', +'SslPolicyReference', +'StartInstanceRequest', +'StartWithEncryptionKeyInstanceRequest', +'StatefulPolicy', +'StatefulPolicyPreservedState', +'StatefulPolicyPreservedStateDiskDevice', +'StopInstanceRequest', +'Subnetwork', +'SubnetworkAggregatedList', +'SubnetworkList', +'SubnetworkLogConfig', +'SubnetworkSecondaryRange', +'SubnetworksClient', +'SubnetworksExpandIpCidrRangeRequest', +'SubnetworksScopedList', +'SubnetworksSetPrivateIpGoogleAccessRequest', +'Subsetting', +'SwitchToCustomModeNetworkRequest', +'TCPHealthCheck', +'Tags', +'TargetGrpcProxiesClient', +'TargetGrpcProxy', +'TargetGrpcProxyList', +'TargetHttpProxiesClient', +'TargetHttpProxiesScopedList', +'TargetHttpProxy', +'TargetHttpProxyAggregatedList', +'TargetHttpProxyList', +'TargetHttpsProxiesClient', +'TargetHttpsProxiesScopedList', +'TargetHttpsProxiesSetQuicOverrideRequest', +'TargetHttpsProxiesSetSslCertificatesRequest', +'TargetHttpsProxy', +'TargetHttpsProxyAggregatedList', +'TargetHttpsProxyList', +'TargetInstance', +'TargetInstanceAggregatedList', +'TargetInstanceList', +'TargetInstancesClient', +'TargetInstancesScopedList', +'TargetPool', +'TargetPoolAggregatedList', +'TargetPoolInstanceHealth', +'TargetPoolList', +'TargetPoolsAddHealthCheckRequest', +'TargetPoolsAddInstanceRequest', +'TargetPoolsClient', +'TargetPoolsRemoveHealthCheckRequest', +'TargetPoolsRemoveInstanceRequest', +'TargetPoolsScopedList', +'TargetReference', +'TargetSslProxiesClient', +'TargetSslProxiesSetBackendServiceRequest', +'TargetSslProxiesSetProxyHeaderRequest', +'TargetSslProxiesSetSslCertificatesRequest', +'TargetSslProxy', +'TargetSslProxyList', +'TargetTcpProxiesClient', +'TargetTcpProxiesSetBackendServiceRequest', +'TargetTcpProxiesSetProxyHeaderRequest', +'TargetTcpProxy', +'TargetTcpProxyList', +'TargetVpnGateway', +'TargetVpnGatewayAggregatedList', +'TargetVpnGatewayList', +'TargetVpnGatewaysClient', +'TargetVpnGatewaysScopedList', +'TestFailure', +'TestIamPermissionsDiskRequest', +'TestIamPermissionsExternalVpnGatewayRequest', +'TestIamPermissionsFirewallPolicyRequest', +'TestIamPermissionsImageRequest', +'TestIamPermissionsInstanceRequest', +'TestIamPermissionsInstanceTemplateRequest', +'TestIamPermissionsLicenseCodeRequest', +'TestIamPermissionsLicenseRequest', +'TestIamPermissionsNetworkEndpointGroupRequest', +'TestIamPermissionsNodeGroupRequest', +'TestIamPermissionsNodeTemplateRequest', +'TestIamPermissionsPacketMirroringRequest', +'TestIamPermissionsRegionDiskRequest', +'TestIamPermissionsReservationRequest', +'TestIamPermissionsResourcePolicyRequest', +'TestIamPermissionsServiceAttachmentRequest', +'TestIamPermissionsSnapshotRequest', +'TestIamPermissionsSubnetworkRequest', +'TestIamPermissionsVpnGatewayRequest', +'TestPermissionsRequest', +'TestPermissionsResponse', +'Uint128', +'UpdateAccessConfigInstanceRequest', +'UpdateAutoscalerRequest', +'UpdateBackendBucketRequest', +'UpdateBackendServiceRequest', +'UpdateDisplayDeviceInstanceRequest', +'UpdateFirewallRequest', +'UpdateHealthCheckRequest', +'UpdateInstanceRequest', +'UpdateNetworkInterfaceInstanceRequest', +'UpdatePeeringNetworkRequest', +'UpdatePerInstanceConfigsInstanceGroupManagerRequest', +'UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest', +'UpdateRegionAutoscalerRequest', +'UpdateRegionBackendServiceRequest', +'UpdateRegionHealthCheckRequest', +'UpdateRegionUrlMapRequest', +'UpdateRouterRequest', +'UpdateShieldedInstanceConfigInstanceRequest', +'UpdateUrlMapRequest', +'UrlMap', +'UrlMapList', +'UrlMapReference', +'UrlMapTest', +'UrlMapTestHeader', +'UrlMapValidationResult', +'UrlMapsAggregatedList', +'UrlMapsClient', +'UrlMapsScopedList', +'UrlMapsValidateRequest', +'UrlMapsValidateResponse', +'UrlRewrite', +'UsableSubnetwork', +'UsableSubnetworkSecondaryRange', +'UsableSubnetworksAggregatedList', +'UsageExportLocation', +'ValidateRegionUrlMapRequest', +'ValidateUrlMapRequest', +'VmEndpointNatMappings', +'VmEndpointNatMappingsInterfaceNatMappings', +'VmEndpointNatMappingsList', +'VpnGateway', +'VpnGatewayAggregatedList', +'VpnGatewayList', +'VpnGatewayStatus', +'VpnGatewayStatusHighAvailabilityRequirementState', +'VpnGatewayStatusTunnel', +'VpnGatewayStatusVpnConnection', +'VpnGatewayVpnGatewayInterface', +'VpnGatewaysClient', +'VpnGatewaysGetStatusResponse', +'VpnGatewaysScopedList', +'VpnTunnel', +'VpnTunnelAggregatedList', +'VpnTunnelList', +'VpnTunnelsClient', +'VpnTunnelsScopedList', +'WafExpressionSet', +'WafExpressionSetExpression', +'WaitGlobalOperationRequest', +'WaitRegionOperationRequest', +'WaitZoneOperationRequest', +'Warning', +'Warnings', +'WeightedBackendService', +'XpnHostList', +'XpnResourceId', +'Zone', +'ZoneList', +'ZoneOperationsClient', +'ZoneSetLabelsRequest', +'ZoneSetPolicyRequest', +'ZonesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/gapic_metadata.json b/owl-bot-staging/v1/google/cloud/compute_v1/gapic_metadata.json new file mode 100644 index 000000000..f3a3c57c6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/gapic_metadata.json @@ -0,0 +1,3654 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.compute_v1", + "protoPackage": "google.cloud.compute.v1", + "schema": "1.0", + "services": { + "AcceleratorTypes": { + "clients": { + "rest": { + "libraryClient": "AcceleratorTypesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "Addresses": { + "clients": { + "rest": { + "libraryClient": "AddressesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "Autoscalers": { + "clients": { + "rest": { + "libraryClient": "AutoscalersClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "BackendBuckets": { + "clients": { + "rest": { + "libraryClient": "BackendBucketsClient", + "rpcs": { + "AddSignedUrlKey": { + "methods": [ + "add_signed_url_key" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DeleteSignedUrlKey": { + "methods": [ + "delete_signed_url_key" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "BackendServices": { + "clients": { + "rest": { + "libraryClient": "BackendServicesClient", + "rpcs": { + "AddSignedUrlKey": { + "methods": [ + "add_signed_url_key" + ] + }, + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DeleteSignedUrlKey": { + "methods": [ + "delete_signed_url_key" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetHealth": { + "methods": [ + "get_health" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetSecurityPolicy": { + "methods": [ + "set_security_policy" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "DiskTypes": { + "clients": { + "rest": { + "libraryClient": "DiskTypesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "Disks": { + "clients": { + "rest": { + "libraryClient": "DisksClient", + "rpcs": { + "AddResourcePolicies": { + "methods": [ + "add_resource_policies" + ] + }, + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "CreateSnapshot": { + "methods": [ + "create_snapshot" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "RemoveResourcePolicies": { + "methods": [ + "remove_resource_policies" + ] + }, + "Resize": { + "methods": [ + "resize" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "ExternalVpnGateways": { + "clients": { + "rest": { + "libraryClient": "ExternalVpnGatewaysClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "FirewallPolicies": { + "clients": { + "rest": { + "libraryClient": "FirewallPoliciesClient", + "rpcs": { + "AddAssociation": { + "methods": [ + "add_association" + ] + }, + "AddRule": { + "methods": [ + "add_rule" + ] + }, + "CloneRules": { + "methods": [ + "clone_rules" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetAssociation": { + "methods": [ + "get_association" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetRule": { + "methods": [ + "get_rule" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListAssociations": { + "methods": [ + "list_associations" + ] + }, + "Move": { + "methods": [ + "move" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "PatchRule": { + "methods": [ + "patch_rule" + ] + }, + "RemoveAssociation": { + "methods": [ + "remove_association" + ] + }, + "RemoveRule": { + "methods": [ + "remove_rule" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "Firewalls": { + "clients": { + "rest": { + "libraryClient": "FirewallsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "ForwardingRules": { + "clients": { + "rest": { + "libraryClient": "ForwardingRulesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "SetTarget": { + "methods": [ + "set_target" + ] + } + } + } + } + }, + "GlobalAddresses": { + "clients": { + "rest": { + "libraryClient": "GlobalAddressesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "GlobalForwardingRules": { + "clients": { + "rest": { + "libraryClient": "GlobalForwardingRulesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "SetTarget": { + "methods": [ + "set_target" + ] + } + } + } + } + }, + "GlobalNetworkEndpointGroups": { + "clients": { + "rest": { + "libraryClient": "GlobalNetworkEndpointGroupsClient", + "rpcs": { + "AttachNetworkEndpoints": { + "methods": [ + "attach_network_endpoints" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DetachNetworkEndpoints": { + "methods": [ + "detach_network_endpoints" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListNetworkEndpoints": { + "methods": [ + "list_network_endpoints" + ] + } + } + } + } + }, + "GlobalOperations": { + "clients": { + "rest": { + "libraryClient": "GlobalOperationsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Wait": { + "methods": [ + "wait" + ] + } + } + } + } + }, + "GlobalOrganizationOperations": { + "clients": { + "rest": { + "libraryClient": "GlobalOrganizationOperationsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "GlobalPublicDelegatedPrefixes": { + "clients": { + "rest": { + "libraryClient": "GlobalPublicDelegatedPrefixesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "HealthChecks": { + "clients": { + "rest": { + "libraryClient": "HealthChecksClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "ImageFamilyViews": { + "clients": { + "rest": { + "libraryClient": "ImageFamilyViewsClient", + "rpcs": { + "Get": { + "methods": [ + "get" + ] + } + } + } + } + }, + "Images": { + "clients": { + "rest": { + "libraryClient": "ImagesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Deprecate": { + "methods": [ + "deprecate" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetFromFamily": { + "methods": [ + "get_from_family" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "InstanceGroupManagers": { + "clients": { + "rest": { + "libraryClient": "InstanceGroupManagersClient", + "rpcs": { + "AbandonInstances": { + "methods": [ + "abandon_instances" + ] + }, + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "ApplyUpdatesToInstances": { + "methods": [ + "apply_updates_to_instances" + ] + }, + "CreateInstances": { + "methods": [ + "create_instances" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DeleteInstances": { + "methods": [ + "delete_instances" + ] + }, + "DeletePerInstanceConfigs": { + "methods": [ + "delete_per_instance_configs" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListErrors": { + "methods": [ + "list_errors" + ] + }, + "ListManagedInstances": { + "methods": [ + "list_managed_instances" + ] + }, + "ListPerInstanceConfigs": { + "methods": [ + "list_per_instance_configs" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "PatchPerInstanceConfigs": { + "methods": [ + "patch_per_instance_configs" + ] + }, + "RecreateInstances": { + "methods": [ + "recreate_instances" + ] + }, + "Resize": { + "methods": [ + "resize" + ] + }, + "SetInstanceTemplate": { + "methods": [ + "set_instance_template" + ] + }, + "SetTargetPools": { + "methods": [ + "set_target_pools" + ] + }, + "UpdatePerInstanceConfigs": { + "methods": [ + "update_per_instance_configs" + ] + } + } + } + } + }, + "InstanceGroups": { + "clients": { + "rest": { + "libraryClient": "InstanceGroupsClient", + "rpcs": { + "AddInstances": { + "methods": [ + "add_instances" + ] + }, + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "RemoveInstances": { + "methods": [ + "remove_instances" + ] + }, + "SetNamedPorts": { + "methods": [ + "set_named_ports" + ] + } + } + } + } + }, + "InstanceTemplates": { + "clients": { + "rest": { + "libraryClient": "InstanceTemplatesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "Instances": { + "clients": { + "rest": { + "libraryClient": "InstancesClient", + "rpcs": { + "AddAccessConfig": { + "methods": [ + "add_access_config" + ] + }, + "AddResourcePolicies": { + "methods": [ + "add_resource_policies" + ] + }, + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "AttachDisk": { + "methods": [ + "attach_disk" + ] + }, + "BulkInsert": { + "methods": [ + "bulk_insert" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DeleteAccessConfig": { + "methods": [ + "delete_access_config" + ] + }, + "DetachDisk": { + "methods": [ + "detach_disk" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetEffectiveFirewalls": { + "methods": [ + "get_effective_firewalls" + ] + }, + "GetGuestAttributes": { + "methods": [ + "get_guest_attributes" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetScreenshot": { + "methods": [ + "get_screenshot" + ] + }, + "GetSerialPortOutput": { + "methods": [ + "get_serial_port_output" + ] + }, + "GetShieldedInstanceIdentity": { + "methods": [ + "get_shielded_instance_identity" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListReferrers": { + "methods": [ + "list_referrers" + ] + }, + "RemoveResourcePolicies": { + "methods": [ + "remove_resource_policies" + ] + }, + "Reset": { + "methods": [ + "reset" + ] + }, + "SendDiagnosticInterrupt": { + "methods": [ + "send_diagnostic_interrupt" + ] + }, + "SetDeletionProtection": { + "methods": [ + "set_deletion_protection" + ] + }, + "SetDiskAutoDelete": { + "methods": [ + "set_disk_auto_delete" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "SetMachineResources": { + "methods": [ + "set_machine_resources" + ] + }, + "SetMachineType": { + "methods": [ + "set_machine_type" + ] + }, + "SetMetadata": { + "methods": [ + "set_metadata" + ] + }, + "SetMinCpuPlatform": { + "methods": [ + "set_min_cpu_platform" + ] + }, + "SetScheduling": { + "methods": [ + "set_scheduling" + ] + }, + "SetServiceAccount": { + "methods": [ + "set_service_account" + ] + }, + "SetShieldedInstanceIntegrityPolicy": { + "methods": [ + "set_shielded_instance_integrity_policy" + ] + }, + "SetTags": { + "methods": [ + "set_tags" + ] + }, + "SimulateMaintenanceEvent": { + "methods": [ + "simulate_maintenance_event" + ] + }, + "Start": { + "methods": [ + "start" + ] + }, + "StartWithEncryptionKey": { + "methods": [ + "start_with_encryption_key" + ] + }, + "Stop": { + "methods": [ + "stop" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "Update": { + "methods": [ + "update" + ] + }, + "UpdateAccessConfig": { + "methods": [ + "update_access_config" + ] + }, + "UpdateDisplayDevice": { + "methods": [ + "update_display_device" + ] + }, + "UpdateNetworkInterface": { + "methods": [ + "update_network_interface" + ] + }, + "UpdateShieldedInstanceConfig": { + "methods": [ + "update_shielded_instance_config" + ] + } + } + } + } + }, + "InterconnectAttachments": { + "clients": { + "rest": { + "libraryClient": "InterconnectAttachmentsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "InterconnectLocations": { + "clients": { + "rest": { + "libraryClient": "InterconnectLocationsClient", + "rpcs": { + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "Interconnects": { + "clients": { + "rest": { + "libraryClient": "InterconnectsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetDiagnostics": { + "methods": [ + "get_diagnostics" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "LicenseCodes": { + "clients": { + "rest": { + "libraryClient": "LicenseCodesClient", + "rpcs": { + "Get": { + "methods": [ + "get" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "Licenses": { + "clients": { + "rest": { + "libraryClient": "LicensesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "MachineTypes": { + "clients": { + "rest": { + "libraryClient": "MachineTypesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "NetworkEndpointGroups": { + "clients": { + "rest": { + "libraryClient": "NetworkEndpointGroupsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "AttachNetworkEndpoints": { + "methods": [ + "attach_network_endpoints" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DetachNetworkEndpoints": { + "methods": [ + "detach_network_endpoints" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListNetworkEndpoints": { + "methods": [ + "list_network_endpoints" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "Networks": { + "clients": { + "rest": { + "libraryClient": "NetworksClient", + "rpcs": { + "AddPeering": { + "methods": [ + "add_peering" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetEffectiveFirewalls": { + "methods": [ + "get_effective_firewalls" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListPeeringRoutes": { + "methods": [ + "list_peering_routes" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "RemovePeering": { + "methods": [ + "remove_peering" + ] + }, + "SwitchToCustomMode": { + "methods": [ + "switch_to_custom_mode" + ] + }, + "UpdatePeering": { + "methods": [ + "update_peering" + ] + } + } + } + } + }, + "NodeGroups": { + "clients": { + "rest": { + "libraryClient": "NodeGroupsClient", + "rpcs": { + "AddNodes": { + "methods": [ + "add_nodes" + ] + }, + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DeleteNodes": { + "methods": [ + "delete_nodes" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListNodes": { + "methods": [ + "list_nodes" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SetNodeTemplate": { + "methods": [ + "set_node_template" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "NodeTemplates": { + "clients": { + "rest": { + "libraryClient": "NodeTemplatesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "NodeTypes": { + "clients": { + "rest": { + "libraryClient": "NodeTypesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "PacketMirrorings": { + "clients": { + "rest": { + "libraryClient": "PacketMirroringsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "Projects": { + "clients": { + "rest": { + "libraryClient": "ProjectsClient", + "rpcs": { + "DisableXpnHost": { + "methods": [ + "disable_xpn_host" + ] + }, + "DisableXpnResource": { + "methods": [ + "disable_xpn_resource" + ] + }, + "EnableXpnHost": { + "methods": [ + "enable_xpn_host" + ] + }, + "EnableXpnResource": { + "methods": [ + "enable_xpn_resource" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetXpnHost": { + "methods": [ + "get_xpn_host" + ] + }, + "GetXpnResources": { + "methods": [ + "get_xpn_resources" + ] + }, + "ListXpnHosts": { + "methods": [ + "list_xpn_hosts" + ] + }, + "MoveDisk": { + "methods": [ + "move_disk" + ] + }, + "MoveInstance": { + "methods": [ + "move_instance" + ] + }, + "SetCommonInstanceMetadata": { + "methods": [ + "set_common_instance_metadata" + ] + }, + "SetDefaultNetworkTier": { + "methods": [ + "set_default_network_tier" + ] + }, + "SetUsageExportBucket": { + "methods": [ + "set_usage_export_bucket" + ] + } + } + } + } + }, + "PublicAdvertisedPrefixes": { + "clients": { + "rest": { + "libraryClient": "PublicAdvertisedPrefixesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "PublicDelegatedPrefixes": { + "clients": { + "rest": { + "libraryClient": "PublicDelegatedPrefixesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "RegionAutoscalers": { + "clients": { + "rest": { + "libraryClient": "RegionAutoscalersClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "RegionBackendServices": { + "clients": { + "rest": { + "libraryClient": "RegionBackendServicesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetHealth": { + "methods": [ + "get_health" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "RegionCommitments": { + "clients": { + "rest": { + "libraryClient": "RegionCommitmentsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "RegionDiskTypes": { + "clients": { + "rest": { + "libraryClient": "RegionDiskTypesClient", + "rpcs": { + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "RegionDisks": { + "clients": { + "rest": { + "libraryClient": "RegionDisksClient", + "rpcs": { + "AddResourcePolicies": { + "methods": [ + "add_resource_policies" + ] + }, + "CreateSnapshot": { + "methods": [ + "create_snapshot" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "RemoveResourcePolicies": { + "methods": [ + "remove_resource_policies" + ] + }, + "Resize": { + "methods": [ + "resize" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "RegionHealthCheckServices": { + "clients": { + "rest": { + "libraryClient": "RegionHealthCheckServicesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "RegionHealthChecks": { + "clients": { + "rest": { + "libraryClient": "RegionHealthChecksClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "RegionInstanceGroupManagers": { + "clients": { + "rest": { + "libraryClient": "RegionInstanceGroupManagersClient", + "rpcs": { + "AbandonInstances": { + "methods": [ + "abandon_instances" + ] + }, + "ApplyUpdatesToInstances": { + "methods": [ + "apply_updates_to_instances" + ] + }, + "CreateInstances": { + "methods": [ + "create_instances" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "DeleteInstances": { + "methods": [ + "delete_instances" + ] + }, + "DeletePerInstanceConfigs": { + "methods": [ + "delete_per_instance_configs" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListErrors": { + "methods": [ + "list_errors" + ] + }, + "ListManagedInstances": { + "methods": [ + "list_managed_instances" + ] + }, + "ListPerInstanceConfigs": { + "methods": [ + "list_per_instance_configs" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "PatchPerInstanceConfigs": { + "methods": [ + "patch_per_instance_configs" + ] + }, + "RecreateInstances": { + "methods": [ + "recreate_instances" + ] + }, + "Resize": { + "methods": [ + "resize" + ] + }, + "SetInstanceTemplate": { + "methods": [ + "set_instance_template" + ] + }, + "SetTargetPools": { + "methods": [ + "set_target_pools" + ] + }, + "UpdatePerInstanceConfigs": { + "methods": [ + "update_per_instance_configs" + ] + } + } + } + } + }, + "RegionInstanceGroups": { + "clients": { + "rest": { + "libraryClient": "RegionInstanceGroupsClient", + "rpcs": { + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "SetNamedPorts": { + "methods": [ + "set_named_ports" + ] + } + } + } + } + }, + "RegionInstances": { + "clients": { + "rest": { + "libraryClient": "RegionInstancesClient", + "rpcs": { + "BulkInsert": { + "methods": [ + "bulk_insert" + ] + } + } + } + } + }, + "RegionNetworkEndpointGroups": { + "clients": { + "rest": { + "libraryClient": "RegionNetworkEndpointGroupsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "RegionNotificationEndpoints": { + "clients": { + "rest": { + "libraryClient": "RegionNotificationEndpointsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "RegionOperations": { + "clients": { + "rest": { + "libraryClient": "RegionOperationsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Wait": { + "methods": [ + "wait" + ] + } + } + } + } + }, + "RegionSslCertificates": { + "clients": { + "rest": { + "libraryClient": "RegionSslCertificatesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "RegionTargetHttpProxies": { + "clients": { + "rest": { + "libraryClient": "RegionTargetHttpProxiesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetUrlMap": { + "methods": [ + "set_url_map" + ] + } + } + } + } + }, + "RegionTargetHttpsProxies": { + "clients": { + "rest": { + "libraryClient": "RegionTargetHttpsProxiesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetSslCertificates": { + "methods": [ + "set_ssl_certificates" + ] + }, + "SetUrlMap": { + "methods": [ + "set_url_map" + ] + } + } + } + } + }, + "RegionUrlMaps": { + "clients": { + "rest": { + "libraryClient": "RegionUrlMapsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + }, + "Validate": { + "methods": [ + "validate" + ] + } + } + } + } + }, + "Regions": { + "clients": { + "rest": { + "libraryClient": "RegionsClient", + "rpcs": { + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "Reservations": { + "clients": { + "rest": { + "libraryClient": "ReservationsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Resize": { + "methods": [ + "resize" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "ResourcePolicies": { + "clients": { + "rest": { + "libraryClient": "ResourcePoliciesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "Routers": { + "clients": { + "rest": { + "libraryClient": "RoutersClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetNatMappingInfo": { + "methods": [ + "get_nat_mapping_info" + ] + }, + "GetRouterStatus": { + "methods": [ + "get_router_status" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Preview": { + "methods": [ + "preview" + ] + }, + "Update": { + "methods": [ + "update" + ] + } + } + } + } + }, + "Routes": { + "clients": { + "rest": { + "libraryClient": "RoutesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "SecurityPolicies": { + "clients": { + "rest": { + "libraryClient": "SecurityPoliciesClient", + "rpcs": { + "AddRule": { + "methods": [ + "add_rule" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetRule": { + "methods": [ + "get_rule" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListPreconfiguredExpressionSets": { + "methods": [ + "list_preconfigured_expression_sets" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "PatchRule": { + "methods": [ + "patch_rule" + ] + }, + "RemoveRule": { + "methods": [ + "remove_rule" + ] + } + } + } + } + }, + "ServiceAttachments": { + "clients": { + "rest": { + "libraryClient": "ServiceAttachmentsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "Snapshots": { + "clients": { + "rest": { + "libraryClient": "SnapshotsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "SslCertificates": { + "clients": { + "rest": { + "libraryClient": "SslCertificatesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "SslPolicies": { + "clients": { + "rest": { + "libraryClient": "SslPoliciesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListAvailableFeatures": { + "methods": [ + "list_available_features" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "Subnetworks": { + "clients": { + "rest": { + "libraryClient": "SubnetworksClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "ExpandIpCidrRange": { + "methods": [ + "expand_ip_cidr_range" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "ListUsable": { + "methods": [ + "list_usable" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SetPrivateIpGoogleAccess": { + "methods": [ + "set_private_ip_google_access" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "TargetGrpcProxies": { + "clients": { + "rest": { + "libraryClient": "TargetGrpcProxiesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + } + } + } + } + }, + "TargetHttpProxies": { + "clients": { + "rest": { + "libraryClient": "TargetHttpProxiesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetUrlMap": { + "methods": [ + "set_url_map" + ] + } + } + } + } + }, + "TargetHttpsProxies": { + "clients": { + "rest": { + "libraryClient": "TargetHttpsProxiesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "SetQuicOverride": { + "methods": [ + "set_quic_override" + ] + }, + "SetSslCertificates": { + "methods": [ + "set_ssl_certificates" + ] + }, + "SetSslPolicy": { + "methods": [ + "set_ssl_policy" + ] + }, + "SetUrlMap": { + "methods": [ + "set_url_map" + ] + } + } + } + } + }, + "TargetInstances": { + "clients": { + "rest": { + "libraryClient": "TargetInstancesClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "TargetPools": { + "clients": { + "rest": { + "libraryClient": "TargetPoolsClient", + "rpcs": { + "AddHealthCheck": { + "methods": [ + "add_health_check" + ] + }, + "AddInstance": { + "methods": [ + "add_instance" + ] + }, + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetHealth": { + "methods": [ + "get_health" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "RemoveHealthCheck": { + "methods": [ + "remove_health_check" + ] + }, + "RemoveInstance": { + "methods": [ + "remove_instance" + ] + }, + "SetBackup": { + "methods": [ + "set_backup" + ] + } + } + } + } + }, + "TargetSslProxies": { + "clients": { + "rest": { + "libraryClient": "TargetSslProxiesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetBackendService": { + "methods": [ + "set_backend_service" + ] + }, + "SetProxyHeader": { + "methods": [ + "set_proxy_header" + ] + }, + "SetSslCertificates": { + "methods": [ + "set_ssl_certificates" + ] + }, + "SetSslPolicy": { + "methods": [ + "set_ssl_policy" + ] + } + } + } + } + }, + "TargetTcpProxies": { + "clients": { + "rest": { + "libraryClient": "TargetTcpProxiesClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetBackendService": { + "methods": [ + "set_backend_service" + ] + }, + "SetProxyHeader": { + "methods": [ + "set_proxy_header" + ] + } + } + } + } + }, + "TargetVpnGateways": { + "clients": { + "rest": { + "libraryClient": "TargetVpnGatewaysClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "UrlMaps": { + "clients": { + "rest": { + "libraryClient": "UrlMapsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "InvalidateCache": { + "methods": [ + "invalidate_cache" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Patch": { + "methods": [ + "patch" + ] + }, + "Update": { + "methods": [ + "update" + ] + }, + "Validate": { + "methods": [ + "validate" + ] + } + } + } + } + }, + "VpnGateways": { + "clients": { + "rest": { + "libraryClient": "VpnGatewaysClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "GetStatus": { + "methods": [ + "get_status" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "SetLabels": { + "methods": [ + "set_labels" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + } + } + } + } + }, + "VpnTunnels": { + "clients": { + "rest": { + "libraryClient": "VpnTunnelsClient", + "rpcs": { + "AggregatedList": { + "methods": [ + "aggregated_list" + ] + }, + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "Insert": { + "methods": [ + "insert" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + }, + "ZoneOperations": { + "clients": { + "rest": { + "libraryClient": "ZoneOperationsClient", + "rpcs": { + "Delete": { + "methods": [ + "delete" + ] + }, + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + }, + "Wait": { + "methods": [ + "wait" + ] + } + } + } + } + }, + "Zones": { + "clients": { + "rest": { + "libraryClient": "ZonesClient", + "rpcs": { + "Get": { + "methods": [ + "get" + ] + }, + "List": { + "methods": [ + "list" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/py.typed b/owl-bot-staging/v1/google/cloud/compute_v1/py.typed new file mode 100644 index 000000000..071da5269 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-compute package uses inline types. diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/__init__.py new file mode 100644 index 000000000..4de65971c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/__init__.py new file mode 100644 index 000000000..d18479c79 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import AcceleratorTypesClient + +__all__ = ( + 'AcceleratorTypesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/client.py new file mode 100644 index 000000000..ddda69c90 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/client.py @@ -0,0 +1,613 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.accelerator_types import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import AcceleratorTypesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import AcceleratorTypesRestTransport + + +class AcceleratorTypesClientMeta(type): + """Metaclass for the AcceleratorTypes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[AcceleratorTypesTransport]] + _transport_registry["rest"] = AcceleratorTypesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[AcceleratorTypesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AcceleratorTypesClient(metaclass=AcceleratorTypesClientMeta): + """Services + The AcceleratorTypes API. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AcceleratorTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AcceleratorTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AcceleratorTypesTransport: + """Returns the transport used by the client instance. + + Returns: + AcceleratorTypesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AcceleratorTypesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the accelerator types client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, AcceleratorTypesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AcceleratorTypesTransport): + # transport is a AcceleratorTypesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListAcceleratorTypesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of accelerator types. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListAcceleratorTypesRequest, dict]): + The request object. A request message for + AcceleratorTypes.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.accelerator_types.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListAcceleratorTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListAcceleratorTypesRequest): + request = compute.AggregatedListAcceleratorTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetAcceleratorTypeRequest, dict] = None, + *, + project: str = None, + zone: str = None, + accelerator_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.AcceleratorType: + r"""Returns the specified accelerator type. + + Args: + request (Union[google.cloud.compute_v1.types.GetAcceleratorTypeRequest, dict]): + The request object. A request message for + AcceleratorTypes.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + accelerator_type (str): + Name of the accelerator type to + return. + + This corresponds to the ``accelerator_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.AcceleratorType: + Represents an Accelerator Type + resource. Google Cloud Platform provides + graphics processing units (accelerators) + that you can add to VM instances to + improve or accelerate performance when + working with intensive workloads. For + more information, read GPUs on Compute + Engine. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, accelerator_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetAcceleratorTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetAcceleratorTypeRequest): + request = compute.GetAcceleratorTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if accelerator_type is not None: + request.accelerator_type = accelerator_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListAcceleratorTypesRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of accelerator types that are + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListAcceleratorTypesRequest, dict]): + The request object. A request message for + AcceleratorTypes.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.accelerator_types.pagers.ListPager: + Contains a list of accelerator types. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListAcceleratorTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListAcceleratorTypesRequest): + request = compute.ListAcceleratorTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "AcceleratorTypesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/pagers.py new file mode 100644 index 000000000..fa7635b61 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.AcceleratorTypeAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.AcceleratorTypeAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.AcceleratorTypeAggregatedList], + request: compute.AggregatedListAcceleratorTypesRequest, + response: compute.AcceleratorTypeAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListAcceleratorTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.AcceleratorTypeAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListAcceleratorTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.AcceleratorTypeAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.AcceleratorTypesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.AcceleratorTypesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.AcceleratorTypeList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.AcceleratorTypeList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.AcceleratorTypeList], + request: compute.ListAcceleratorTypesRequest, + response: compute.AcceleratorTypeList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListAcceleratorTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.AcceleratorTypeList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListAcceleratorTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.AcceleratorTypeList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.AcceleratorType]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/__init__.py new file mode 100644 index 000000000..2e0df0f68 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import AcceleratorTypesTransport +from .rest import AcceleratorTypesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AcceleratorTypesTransport]] +_transport_registry['rest'] = AcceleratorTypesRestTransport + +__all__ = ( + 'AcceleratorTypesTransport', + 'AcceleratorTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/base.py new file mode 100644 index 000000000..ba82db610 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/base.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class AcceleratorTypesTransport(abc.ABC): + """Abstract transport class for AcceleratorTypes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListAcceleratorTypesRequest], + Union[ + compute.AcceleratorTypeAggregatedList, + Awaitable[compute.AcceleratorTypeAggregatedList] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetAcceleratorTypeRequest], + Union[ + compute.AcceleratorType, + Awaitable[compute.AcceleratorType] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListAcceleratorTypesRequest], + Union[ + compute.AcceleratorTypeList, + Awaitable[compute.AcceleratorTypeList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'AcceleratorTypesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/rest.py new file mode 100644 index 000000000..1152a016e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/accelerator_types/transports/rest.py @@ -0,0 +1,417 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import AcceleratorTypesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class AcceleratorTypesRestTransport(AcceleratorTypesTransport): + """REST backend transport for AcceleratorTypes. + + Services + The AcceleratorTypes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListAcceleratorTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AcceleratorTypeAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListAcceleratorTypesRequest): + The request object. A request message for + AcceleratorTypes.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AcceleratorTypeAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/acceleratorTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListAcceleratorTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListAcceleratorTypesRequest.to_json( + compute.AggregatedListAcceleratorTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AcceleratorTypeAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetAcceleratorTypeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AcceleratorType: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetAcceleratorTypeRequest): + The request object. A request message for + AcceleratorTypes.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AcceleratorType: + Represents an Accelerator Type + resource. Google Cloud Platform provides + graphics processing units (accelerators) + that you can add to VM instances to + improve or accelerate performance when + working with intensive workloads. For + more information, read GPUs on Compute + Engine. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/acceleratorTypes/{accelerator_type}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "accelerator_type", + "acceleratorType" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetAcceleratorTypeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetAcceleratorTypeRequest.to_json( + compute.GetAcceleratorTypeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AcceleratorType.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListAcceleratorTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AcceleratorTypeList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListAcceleratorTypesRequest): + The request object. A request message for + AcceleratorTypes.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AcceleratorTypeList: + Contains a list of accelerator types. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/acceleratorTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListAcceleratorTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListAcceleratorTypesRequest.to_json( + compute.ListAcceleratorTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AcceleratorTypeList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListAcceleratorTypesRequest], + compute.AcceleratorTypeAggregatedList]: + return self._aggregated_list + @ property + def get(self) -> Callable[ + [compute.GetAcceleratorTypeRequest], + compute.AcceleratorType]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListAcceleratorTypesRequest], + compute.AcceleratorTypeList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'AcceleratorTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/__init__.py new file mode 100644 index 000000000..2267206dd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import AddressesClient + +__all__ = ( + 'AddressesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/client.py new file mode 100644 index 000000000..a1fbb9dac --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/client.py @@ -0,0 +1,797 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.addresses import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import AddressesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import AddressesRestTransport + + +class AddressesClientMeta(type): + """Metaclass for the Addresses client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[AddressesTransport]] + _transport_registry["rest"] = AddressesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[AddressesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AddressesClient(metaclass=AddressesClientMeta): + """The Addresses API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AddressesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AddressesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AddressesTransport: + """Returns the transport used by the client instance. + + Returns: + AddressesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AddressesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the addresses client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, AddressesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AddressesTransport): + # transport is a AddressesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListAddressesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of addresses. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListAddressesRequest, dict]): + The request object. A request message for + Addresses.AggregatedList. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.addresses.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListAddressesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListAddressesRequest): + request = compute.AggregatedListAddressesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteAddressRequest, dict] = None, + *, + project: str = None, + region: str = None, + address: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified address resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteAddressRequest, dict]): + The request object. A request message for + Addresses.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + address (str): + Name of the address resource to + delete. + + This corresponds to the ``address`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, address]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteAddressRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteAddressRequest): + request = compute.DeleteAddressRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if address is not None: + request.address = address + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetAddressRequest, dict] = None, + *, + project: str = None, + region: str = None, + address: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Address: + r"""Returns the specified address resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetAddressRequest, dict]): + The request object. A request message for Addresses.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + address (str): + Name of the address resource to + return. + + This corresponds to the ``address`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Address: + Represents an IP Address resource. Google Compute Engine + has two IP Address resources: \* [Global (external and + internal)](\ https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses) + \* [Regional (external and + internal)](\ https://cloud.google.com/compute/docs/reference/rest/v1/addresses) + For more information, see Reserving a static external IP + address. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, address]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetAddressRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetAddressRequest): + request = compute.GetAddressRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if address is not None: + request.address = address + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertAddressRequest, dict] = None, + *, + project: str = None, + region: str = None, + address_resource: compute.Address = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an address resource in the specified project + by using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertAddressRequest, dict]): + The request object. A request message for + Addresses.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + address_resource (google.cloud.compute_v1.types.Address): + The body resource for this request + This corresponds to the ``address_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, address_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertAddressRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertAddressRequest): + request = compute.InsertAddressRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if address_resource is not None: + request.address_resource = address_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListAddressesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of addresses contained within the + specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListAddressesRequest, dict]): + The request object. A request message for + Addresses.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.addresses.pagers.ListPager: + Contains a list of addresses. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListAddressesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListAddressesRequest): + request = compute.ListAddressesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "AddressesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/pagers.py new file mode 100644 index 000000000..ce40cdee3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.AddressAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.AddressAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.AddressAggregatedList], + request: compute.AggregatedListAddressesRequest, + response: compute.AddressAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListAddressesRequest): + The initial request object. + response (google.cloud.compute_v1.types.AddressAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListAddressesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.AddressAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.AddressesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.AddressesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.AddressList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.AddressList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.AddressList], + request: compute.ListAddressesRequest, + response: compute.AddressList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListAddressesRequest): + The initial request object. + response (google.cloud.compute_v1.types.AddressList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListAddressesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.AddressList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Address]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/__init__.py new file mode 100644 index 000000000..4d7b18d11 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import AddressesTransport +from .rest import AddressesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AddressesTransport]] +_transport_registry['rest'] = AddressesRestTransport + +__all__ = ( + 'AddressesTransport', + 'AddressesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/base.py new file mode 100644 index 000000000..62fc7ecc1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class AddressesTransport(abc.ABC): + """Abstract transport class for Addresses.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListAddressesRequest], + Union[ + compute.AddressAggregatedList, + Awaitable[compute.AddressAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteAddressRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetAddressRequest], + Union[ + compute.Address, + Awaitable[compute.Address] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertAddressRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListAddressesRequest], + Union[ + compute.AddressList, + Awaitable[compute.AddressList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'AddressesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/rest.py new file mode 100644 index 000000000..bd51c7af0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/addresses/transports/rest.py @@ -0,0 +1,644 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import AddressesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class AddressesRestTransport(AddressesTransport): + """REST backend transport for Addresses. + + The Addresses API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListAddressesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AddressAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListAddressesRequest): + The request object. A request message for + Addresses.AggregatedList. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AddressAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/addresses', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListAddressesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListAddressesRequest.to_json( + compute.AggregatedListAddressesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AddressAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteAddressRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteAddressRequest): + The request object. A request message for + Addresses.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/addresses/{address}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "address", + "address" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteAddressRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteAddressRequest.to_json( + compute.DeleteAddressRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetAddressRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Address: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetAddressRequest): + The request object. A request message for Addresses.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Address: + Represents an IP Address resource. Google Compute Engine + has two IP Address resources: \* `Global (external and + internal) `__ + \* `Regional (external and + internal) `__ + For more information, see Reserving a static external IP + address. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/addresses/{address}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "address", + "address" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetAddressRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetAddressRequest.to_json( + compute.GetAddressRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Address.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertAddressRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertAddressRequest): + The request object. A request message for + Addresses.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/addresses', + 'body': 'address_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertAddressRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Address.to_json( + compute.Address( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertAddressRequest.to_json( + compute.InsertAddressRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListAddressesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AddressList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListAddressesRequest): + The request object. A request message for Addresses.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AddressList: + Contains a list of addresses. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/addresses', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListAddressesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListAddressesRequest.to_json( + compute.ListAddressesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AddressList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListAddressesRequest], + compute.AddressAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteAddressRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetAddressRequest], + compute.Address]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertAddressRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListAddressesRequest], + compute.AddressList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'AddressesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/__init__.py new file mode 100644 index 000000000..e3720b196 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import AutoscalersClient + +__all__ = ( + 'AutoscalersClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/client.py new file mode 100644 index 000000000..17626faff --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/client.py @@ -0,0 +1,995 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.autoscalers import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import AutoscalersTransport, DEFAULT_CLIENT_INFO +from .transports.rest import AutoscalersRestTransport + + +class AutoscalersClientMeta(type): + """Metaclass for the Autoscalers client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalersTransport]] + _transport_registry["rest"] = AutoscalersRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[AutoscalersTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class AutoscalersClient(metaclass=AutoscalersClientMeta): + """The Autoscalers API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + AutoscalersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> AutoscalersTransport: + """Returns the transport used by the client instance. + + Returns: + AutoscalersTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, AutoscalersTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the autoscalers client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, AutoscalersTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, AutoscalersTransport): + # transport is a AutoscalersTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListAutoscalersRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of autoscalers. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListAutoscalersRequest, dict]): + The request object. A request message for + Autoscalers.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.autoscalers.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListAutoscalersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListAutoscalersRequest): + request = compute.AggregatedListAutoscalersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteAutoscalerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + autoscaler: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified autoscaler. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteAutoscalerRequest, dict]): + The request object. A request message for + Autoscalers.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler (str): + Name of the autoscaler to delete. + This corresponds to the ``autoscaler`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, autoscaler]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteAutoscalerRequest): + request = compute.DeleteAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if autoscaler is not None: + request.autoscaler = autoscaler + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetAutoscalerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + autoscaler: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Autoscaler: + r"""Returns the specified autoscaler resource. Gets a + list of available autoscalers by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetAutoscalerRequest, dict]): + The request object. A request message for + Autoscalers.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler (str): + Name of the autoscaler to return. + This corresponds to the ``autoscaler`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Autoscaler: + Represents an Autoscaler resource. Google Compute Engine + has two Autoscaler resources: \* + [Zonal](/compute/docs/reference/rest/v1/autoscalers) \* + [Regional](/compute/docs/reference/rest/v1/regionAutoscalers) + Use autoscalers to automatically add or delete instances + from a managed instance group according to your defined + autoscaling policy. For more information, read + Autoscaling Groups of Instances. For zonal managed + instance groups resource, use the autoscaler resource. + For regional managed instance groups, use the + regionAutoscalers resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, autoscaler]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetAutoscalerRequest): + request = compute.GetAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if autoscaler is not None: + request.autoscaler = autoscaler + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertAutoscalerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + autoscaler_resource: compute.Autoscaler = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an autoscaler in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertAutoscalerRequest, dict]): + The request object. A request message for + Autoscalers.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + This corresponds to the ``autoscaler_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, autoscaler_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertAutoscalerRequest): + request = compute.InsertAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if autoscaler_resource is not None: + request.autoscaler_resource = autoscaler_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListAutoscalersRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of autoscalers contained within the + specified zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListAutoscalersRequest, dict]): + The request object. A request message for + Autoscalers.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.autoscalers.pagers.ListPager: + Contains a list of Autoscaler + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListAutoscalersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListAutoscalersRequest): + request = compute.ListAutoscalersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchAutoscalerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + autoscaler_resource: compute.Autoscaler = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates an autoscaler in the specified project using + the data included in the request. This method supports + PATCH semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchAutoscalerRequest, dict]): + The request object. A request message for + Autoscalers.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + This corresponds to the ``autoscaler_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, autoscaler_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchAutoscalerRequest): + request = compute.PatchAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if autoscaler_resource is not None: + request.autoscaler_resource = autoscaler_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateAutoscalerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + autoscaler_resource: compute.Autoscaler = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates an autoscaler in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateAutoscalerRequest, dict]): + The request object. A request message for + Autoscalers.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + This corresponds to the ``autoscaler_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, autoscaler_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateAutoscalerRequest): + request = compute.UpdateAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if autoscaler_resource is not None: + request.autoscaler_resource = autoscaler_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "AutoscalersClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/pagers.py new file mode 100644 index 000000000..6c82f7f8a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.AutoscalerAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.AutoscalerAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.AutoscalerAggregatedList], + request: compute.AggregatedListAutoscalersRequest, + response: compute.AutoscalerAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListAutoscalersRequest): + The initial request object. + response (google.cloud.compute_v1.types.AutoscalerAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListAutoscalersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.AutoscalerAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.AutoscalersScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.AutoscalersScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.AutoscalerList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.AutoscalerList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.AutoscalerList], + request: compute.ListAutoscalersRequest, + response: compute.AutoscalerList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListAutoscalersRequest): + The initial request object. + response (google.cloud.compute_v1.types.AutoscalerList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListAutoscalersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.AutoscalerList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Autoscaler]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/__init__.py new file mode 100644 index 000000000..97e491125 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import AutoscalersTransport +from .rest import AutoscalersRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[AutoscalersTransport]] +_transport_registry['rest'] = AutoscalersRestTransport + +__all__ = ( + 'AutoscalersTransport', + 'AutoscalersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/base.py new file mode 100644 index 000000000..adde6b25d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class AutoscalersTransport(abc.ABC): + """Abstract transport class for Autoscalers.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListAutoscalersRequest], + Union[ + compute.AutoscalerAggregatedList, + Awaitable[compute.AutoscalerAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetAutoscalerRequest], + Union[ + compute.Autoscaler, + Awaitable[compute.Autoscaler] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListAutoscalersRequest], + Union[ + compute.AutoscalerList, + Awaitable[compute.AutoscalerList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'AutoscalersTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/rest.py new file mode 100644 index 000000000..fe189f592 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/autoscalers/transports/rest.py @@ -0,0 +1,889 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import AutoscalersTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class AutoscalersRestTransport(AutoscalersTransport): + """REST backend transport for Autoscalers. + + The Autoscalers API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListAutoscalersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AutoscalerAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListAutoscalersRequest): + The request object. A request message for + Autoscalers.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AutoscalerAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/autoscalers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListAutoscalersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListAutoscalersRequest.to_json( + compute.AggregatedListAutoscalersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AutoscalerAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteAutoscalerRequest): + The request object. A request message for + Autoscalers.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/autoscalers/{autoscaler}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "autoscaler", + "autoscaler" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteAutoscalerRequest.to_json( + compute.DeleteAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Autoscaler: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetAutoscalerRequest): + The request object. A request message for + Autoscalers.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Autoscaler: + Represents an Autoscaler resource. Google Compute Engine + has two Autoscaler resources: \* + `Zonal `__ + \* + `Regional `__ + Use autoscalers to automatically add or delete instances + from a managed instance group according to your defined + autoscaling policy. For more information, read + Autoscaling Groups of Instances. For zonal managed + instance groups resource, use the autoscaler resource. + For regional managed instance groups, use the + regionAutoscalers resource. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/autoscalers/{autoscaler}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "autoscaler", + "autoscaler" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetAutoscalerRequest.to_json( + compute.GetAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Autoscaler.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertAutoscalerRequest): + The request object. A request message for + Autoscalers.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/autoscalers', + 'body': 'autoscaler_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Autoscaler.to_json( + compute.Autoscaler( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertAutoscalerRequest.to_json( + compute.InsertAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListAutoscalersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AutoscalerList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListAutoscalersRequest): + The request object. A request message for + Autoscalers.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AutoscalerList: + Contains a list of Autoscaler + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/autoscalers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListAutoscalersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListAutoscalersRequest.to_json( + compute.ListAutoscalersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AutoscalerList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchAutoscalerRequest): + The request object. A request message for + Autoscalers.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/autoscalers', + 'body': 'autoscaler_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.PatchAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Autoscaler.to_json( + compute.Autoscaler( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchAutoscalerRequest.to_json( + compute.PatchAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateAutoscalerRequest): + The request object. A request message for + Autoscalers.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/autoscalers', + 'body': 'autoscaler_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.UpdateAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Autoscaler.to_json( + compute.Autoscaler( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateAutoscalerRequest.to_json( + compute.UpdateAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListAutoscalersRequest], + compute.AutoscalerAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteAutoscalerRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetAutoscalerRequest], + compute.Autoscaler]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertAutoscalerRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListAutoscalersRequest], + compute.AutoscalerList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchAutoscalerRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateAutoscalerRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'AutoscalersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/__init__.py new file mode 100644 index 000000000..2bd08b2bc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import BackendBucketsClient + +__all__ = ( + 'BackendBucketsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/client.py new file mode 100644 index 000000000..c5a19f5f0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/client.py @@ -0,0 +1,1092 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.backend_buckets import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import BackendBucketsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import BackendBucketsRestTransport + + +class BackendBucketsClientMeta(type): + """Metaclass for the BackendBuckets client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[BackendBucketsTransport]] + _transport_registry["rest"] = BackendBucketsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[BackendBucketsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BackendBucketsClient(metaclass=BackendBucketsClientMeta): + """The BackendBuckets API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BackendBucketsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BackendBucketsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BackendBucketsTransport: + """Returns the transport used by the client instance. + + Returns: + BackendBucketsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, BackendBucketsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the backend buckets client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BackendBucketsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BackendBucketsTransport): + # transport is a BackendBucketsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_signed_url_key(self, + request: Union[compute.AddSignedUrlKeyBackendBucketRequest, dict] = None, + *, + project: str = None, + backend_bucket: str = None, + signed_url_key_resource: compute.SignedUrlKey = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds a key for validating requests with signed URLs + for this backend bucket. + + Args: + request (Union[google.cloud.compute_v1.types.AddSignedUrlKeyBackendBucketRequest, dict]): + The request object. A request message for + BackendBuckets.AddSignedUrlKey. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket (str): + Name of the BackendBucket resource to + which the Signed URL Key should be + added. The name should conform to + RFC1035. + + This corresponds to the ``backend_bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + signed_url_key_resource (google.cloud.compute_v1.types.SignedUrlKey): + The body resource for this request + This corresponds to the ``signed_url_key_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_bucket, signed_url_key_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddSignedUrlKeyBackendBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddSignedUrlKeyBackendBucketRequest): + request = compute.AddSignedUrlKeyBackendBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_bucket is not None: + request.backend_bucket = backend_bucket + if signed_url_key_resource is not None: + request.signed_url_key_resource = signed_url_key_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_signed_url_key] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteBackendBucketRequest, dict] = None, + *, + project: str = None, + backend_bucket: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified BackendBucket resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteBackendBucketRequest, dict]): + The request object. A request message for + BackendBuckets.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket (str): + Name of the BackendBucket resource to + delete. + + This corresponds to the ``backend_bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_bucket]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteBackendBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteBackendBucketRequest): + request = compute.DeleteBackendBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_bucket is not None: + request.backend_bucket = backend_bucket + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_signed_url_key(self, + request: Union[compute.DeleteSignedUrlKeyBackendBucketRequest, dict] = None, + *, + project: str = None, + backend_bucket: str = None, + key_name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes a key for validating requests with signed + URLs for this backend bucket. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteSignedUrlKeyBackendBucketRequest, dict]): + The request object. A request message for + BackendBuckets.DeleteSignedUrlKey. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket (str): + Name of the BackendBucket resource to + which the Signed URL Key should be + added. The name should conform to + RFC1035. + + This corresponds to the ``backend_bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + key_name (str): + The name of the Signed URL Key to + delete. + + This corresponds to the ``key_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_bucket, key_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteSignedUrlKeyBackendBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteSignedUrlKeyBackendBucketRequest): + request = compute.DeleteSignedUrlKeyBackendBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_bucket is not None: + request.backend_bucket = backend_bucket + if key_name is not None: + request.key_name = key_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_signed_url_key] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetBackendBucketRequest, dict] = None, + *, + project: str = None, + backend_bucket: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.BackendBucket: + r"""Returns the specified BackendBucket resource. Gets a + list of available backend buckets by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetBackendBucketRequest, dict]): + The request object. A request message for + BackendBuckets.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket (str): + Name of the BackendBucket resource to + return. + + This corresponds to the ``backend_bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.BackendBucket: + Represents a Cloud Storage Bucket + resource. This Cloud Storage bucket + resource is referenced by a URL map of a + load balancer. For more information, + read Backend Buckets. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_bucket]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetBackendBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetBackendBucketRequest): + request = compute.GetBackendBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_bucket is not None: + request.backend_bucket = backend_bucket + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertBackendBucketRequest, dict] = None, + *, + project: str = None, + backend_bucket_resource: compute.BackendBucket = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a BackendBucket resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertBackendBucketRequest, dict]): + The request object. A request message for + BackendBuckets.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket_resource (google.cloud.compute_v1.types.BackendBucket): + The body resource for this request + This corresponds to the ``backend_bucket_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_bucket_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertBackendBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertBackendBucketRequest): + request = compute.InsertBackendBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_bucket_resource is not None: + request.backend_bucket_resource = backend_bucket_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListBackendBucketsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of BackendBucket resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListBackendBucketsRequest, dict]): + The request object. A request message for + BackendBuckets.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.backend_buckets.pagers.ListPager: + Contains a list of BackendBucket + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListBackendBucketsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListBackendBucketsRequest): + request = compute.ListBackendBucketsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchBackendBucketRequest, dict] = None, + *, + project: str = None, + backend_bucket: str = None, + backend_bucket_resource: compute.BackendBucket = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified BackendBucket resource with the + data included in the request. This method supports PATCH + semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchBackendBucketRequest, dict]): + The request object. A request message for + BackendBuckets.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket (str): + Name of the BackendBucket resource to + patch. + + This corresponds to the ``backend_bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket_resource (google.cloud.compute_v1.types.BackendBucket): + The body resource for this request + This corresponds to the ``backend_bucket_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_bucket, backend_bucket_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchBackendBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchBackendBucketRequest): + request = compute.PatchBackendBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_bucket is not None: + request.backend_bucket = backend_bucket + if backend_bucket_resource is not None: + request.backend_bucket_resource = backend_bucket_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateBackendBucketRequest, dict] = None, + *, + project: str = None, + backend_bucket: str = None, + backend_bucket_resource: compute.BackendBucket = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified BackendBucket resource with the + data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateBackendBucketRequest, dict]): + The request object. A request message for + BackendBuckets.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket (str): + Name of the BackendBucket resource to + update. + + This corresponds to the ``backend_bucket`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_bucket_resource (google.cloud.compute_v1.types.BackendBucket): + The body resource for this request + This corresponds to the ``backend_bucket_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_bucket, backend_bucket_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateBackendBucketRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateBackendBucketRequest): + request = compute.UpdateBackendBucketRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_bucket is not None: + request.backend_bucket = backend_bucket + if backend_bucket_resource is not None: + request.backend_bucket_resource = backend_bucket_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "BackendBucketsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/pagers.py new file mode 100644 index 000000000..f8418dc9b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.BackendBucketList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.BackendBucketList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.BackendBucketList], + request: compute.ListBackendBucketsRequest, + response: compute.BackendBucketList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListBackendBucketsRequest): + The initial request object. + response (google.cloud.compute_v1.types.BackendBucketList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListBackendBucketsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.BackendBucketList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.BackendBucket]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/__init__.py new file mode 100644 index 000000000..5dc3b41b8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import BackendBucketsTransport +from .rest import BackendBucketsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[BackendBucketsTransport]] +_transport_registry['rest'] = BackendBucketsRestTransport + +__all__ = ( + 'BackendBucketsTransport', + 'BackendBucketsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/base.py new file mode 100644 index 000000000..c5af7c318 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/base.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BackendBucketsTransport(abc.ABC): + """Abstract transport class for BackendBuckets.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_signed_url_key: gapic_v1.method.wrap_method( + self.add_signed_url_key, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.delete_signed_url_key: gapic_v1.method.wrap_method( + self.delete_signed_url_key, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_signed_url_key(self) -> Callable[ + [compute.AddSignedUrlKeyBackendBucketRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteBackendBucketRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_signed_url_key(self) -> Callable[ + [compute.DeleteSignedUrlKeyBackendBucketRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetBackendBucketRequest], + Union[ + compute.BackendBucket, + Awaitable[compute.BackendBucket] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertBackendBucketRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListBackendBucketsRequest], + Union[ + compute.BackendBucketList, + Awaitable[compute.BackendBucketList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchBackendBucketRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateBackendBucketRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'BackendBucketsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/rest.py new file mode 100644 index 000000000..8b38daaae --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_buckets/transports/rest.py @@ -0,0 +1,1007 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import BackendBucketsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class BackendBucketsRestTransport(BackendBucketsTransport): + """REST backend transport for BackendBuckets. + + The BackendBuckets API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_signed_url_key(self, + request: compute.AddSignedUrlKeyBackendBucketRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add signed url key method over HTTP. + + Args: + request (~.compute.AddSignedUrlKeyBackendBucketRequest): + The request object. A request message for + BackendBuckets.AddSignedUrlKey. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}/addSignedUrlKey', + 'body': 'signed_url_key_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_bucket", + "backendBucket" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AddSignedUrlKeyBackendBucketRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SignedUrlKey.to_json( + compute.SignedUrlKey( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddSignedUrlKeyBackendBucketRequest.to_json( + compute.AddSignedUrlKeyBackendBucketRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteBackendBucketRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteBackendBucketRequest): + The request object. A request message for + BackendBuckets.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_bucket", + "backendBucket" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteBackendBucketRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteBackendBucketRequest.to_json( + compute.DeleteBackendBucketRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_signed_url_key(self, + request: compute.DeleteSignedUrlKeyBackendBucketRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete signed url key method over HTTP. + + Args: + request (~.compute.DeleteSignedUrlKeyBackendBucketRequest): + The request object. A request message for + BackendBuckets.DeleteSignedUrlKey. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}/deleteSignedUrlKey', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_bucket", + "backendBucket" + ), + ( + "key_name", + "keyName" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteSignedUrlKeyBackendBucketRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteSignedUrlKeyBackendBucketRequest.to_json( + compute.DeleteSignedUrlKeyBackendBucketRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetBackendBucketRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendBucket: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetBackendBucketRequest): + The request object. A request message for + BackendBuckets.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendBucket: + Represents a Cloud Storage Bucket + resource. This Cloud Storage bucket + resource is referenced by a URL map of a + load balancer. For more information, + read Backend Buckets. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_bucket", + "backendBucket" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetBackendBucketRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetBackendBucketRequest.to_json( + compute.GetBackendBucketRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendBucket.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertBackendBucketRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertBackendBucketRequest): + The request object. A request message for + BackendBuckets.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets', + 'body': 'backend_bucket_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertBackendBucketRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendBucket.to_json( + compute.BackendBucket( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertBackendBucketRequest.to_json( + compute.InsertBackendBucketRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListBackendBucketsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendBucketList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListBackendBucketsRequest): + The request object. A request message for + BackendBuckets.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendBucketList: + Contains a list of BackendBucket + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListBackendBucketsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListBackendBucketsRequest.to_json( + compute.ListBackendBucketsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendBucketList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchBackendBucketRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchBackendBucketRequest): + The request object. A request message for + BackendBuckets.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}', + 'body': 'backend_bucket_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_bucket", + "backendBucket" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchBackendBucketRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendBucket.to_json( + compute.BackendBucket( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchBackendBucketRequest.to_json( + compute.PatchBackendBucketRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateBackendBucketRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateBackendBucketRequest): + The request object. A request message for + BackendBuckets.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}', + 'body': 'backend_bucket_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_bucket", + "backendBucket" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.UpdateBackendBucketRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendBucket.to_json( + compute.BackendBucket( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateBackendBucketRequest.to_json( + compute.UpdateBackendBucketRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_signed_url_key(self) -> Callable[ + [compute.AddSignedUrlKeyBackendBucketRequest], + compute.Operation]: + return self._add_signed_url_key + @ property + def delete(self) -> Callable[ + [compute.DeleteBackendBucketRequest], + compute.Operation]: + return self._delete + @ property + def delete_signed_url_key(self) -> Callable[ + [compute.DeleteSignedUrlKeyBackendBucketRequest], + compute.Operation]: + return self._delete_signed_url_key + @ property + def get(self) -> Callable[ + [compute.GetBackendBucketRequest], + compute.BackendBucket]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertBackendBucketRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListBackendBucketsRequest], + compute.BackendBucketList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchBackendBucketRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateBackendBucketRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'BackendBucketsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/__init__.py new file mode 100644 index 000000000..a757e73ec --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import BackendServicesClient + +__all__ = ( + 'BackendServicesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/client.py new file mode 100644 index 000000000..488a88774 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/client.py @@ -0,0 +1,1369 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.backend_services import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import BackendServicesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import BackendServicesRestTransport + + +class BackendServicesClientMeta(type): + """Metaclass for the BackendServices client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[BackendServicesTransport]] + _transport_registry["rest"] = BackendServicesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[BackendServicesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BackendServicesClient(metaclass=BackendServicesClientMeta): + """The BackendServices API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BackendServicesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BackendServicesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BackendServicesTransport: + """Returns the transport used by the client instance. + + Returns: + BackendServicesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, BackendServicesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the backend services client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BackendServicesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BackendServicesTransport): + # transport is a BackendServicesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_signed_url_key(self, + request: Union[compute.AddSignedUrlKeyBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + signed_url_key_resource: compute.SignedUrlKey = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds a key for validating requests with signed URLs + for this backend service. + + Args: + request (Union[google.cloud.compute_v1.types.AddSignedUrlKeyBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.AddSignedUrlKey. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to which the Signed URL Key should be + added. The name should conform to + RFC1035. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + signed_url_key_resource (google.cloud.compute_v1.types.SignedUrlKey): + The body resource for this request + This corresponds to the ``signed_url_key_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service, signed_url_key_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddSignedUrlKeyBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddSignedUrlKeyBackendServiceRequest): + request = compute.AddSignedUrlKeyBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + if signed_url_key_resource is not None: + request.signed_url_key_resource = signed_url_key_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_signed_url_key] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def aggregated_list(self, + request: Union[compute.AggregatedListBackendServicesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of all BackendService resources, + regional and global, available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListBackendServicesRequest, dict]): + The request object. A request message for + BackendServices.AggregatedList. See the method + description for details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.backend_services.pagers.AggregatedListPager: + Contains a list of + BackendServicesScopedList. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListBackendServicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListBackendServicesRequest): + request = compute.AggregatedListBackendServicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified BackendService resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to delete. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteBackendServiceRequest): + request = compute.DeleteBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_signed_url_key(self, + request: Union[compute.DeleteSignedUrlKeyBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + key_name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes a key for validating requests with signed + URLs for this backend service. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteSignedUrlKeyBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.DeleteSignedUrlKey. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to which the Signed URL Key should be + added. The name should conform to + RFC1035. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + key_name (str): + The name of the Signed URL Key to + delete. + + This corresponds to the ``key_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service, key_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteSignedUrlKeyBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteSignedUrlKeyBackendServiceRequest): + request = compute.DeleteSignedUrlKeyBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + if key_name is not None: + request.key_name = key_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_signed_url_key] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.BackendService: + r"""Returns the specified BackendService resource. Gets a + list of available backend services. + + Args: + request (Union[google.cloud.compute_v1.types.GetBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to return. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.BackendService: + Represents a Backend Service resource. A backend service + defines how Google Cloud load balancers distribute + traffic. The backend service configuration contains a + set of values, such as the protocol used to connect to + backends, various distribution and session settings, + health checks, and timeouts. These settings provide + fine-grained control over how your load balancer + behaves. Most of the settings have default values that + allow for easy configuration if you need to get started + quickly. Backend services in Google Compute Engine can + be either regionally or globally scoped. \* + [Global](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) + \* + [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices) + For more information, see Backend Services. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetBackendServiceRequest): + request = compute.GetBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_health(self, + request: Union[compute.GetHealthBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + resource_group_reference_resource: compute.ResourceGroupReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.BackendServiceGroupHealth: + r"""Gets the most recent health check results for this + BackendService. Example request body: { "group": + "/zones/us-east1-b/instanceGroups/lb-backend-example" } + + Args: + request (Union[google.cloud.compute_v1.types.GetHealthBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.GetHealth. See the method description + for details. + project (str): + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to which the queried instance belongs. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_group_reference_resource (google.cloud.compute_v1.types.ResourceGroupReference): + The body resource for this request + This corresponds to the ``resource_group_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.BackendServiceGroupHealth: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service, resource_group_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetHealthBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetHealthBackendServiceRequest): + request = compute.GetHealthBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + if resource_group_reference_resource is not None: + request.resource_group_reference_resource = resource_group_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_health] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service_resource: compute.BackendService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a BackendService resource in the specified + project using the data included in the request. For more + information, see Backend services overview . + + Args: + request (Union[google.cloud.compute_v1.types.InsertBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + This corresponds to the ``backend_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertBackendServiceRequest): + request = compute.InsertBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service_resource is not None: + request.backend_service_resource = backend_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListBackendServicesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of BackendService resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListBackendServicesRequest, dict]): + The request object. A request message for + BackendServices.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.backend_services.pagers.ListPager: + Contains a list of BackendService + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListBackendServicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListBackendServicesRequest): + request = compute.ListBackendServicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + backend_service_resource: compute.BackendService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified BackendService resource with + the data included in the request. For more information, + see Backend services overview. This method supports + PATCH semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to patch. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + This corresponds to the ``backend_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service, backend_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchBackendServiceRequest): + request = compute.PatchBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + if backend_service_resource is not None: + request.backend_service_resource = backend_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_security_policy(self, + request: Union[compute.SetSecurityPolicyBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + security_policy_reference_resource: compute.SecurityPolicyReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the Google Cloud Armor security policy for the + specified backend service. For more information, see + Google Cloud Armor Overview + + Args: + request (Union[google.cloud.compute_v1.types.SetSecurityPolicyBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.SetSecurityPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to which the security policy should be + set. The name should conform to RFC1035. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy_reference_resource (google.cloud.compute_v1.types.SecurityPolicyReference): + The body resource for this request + This corresponds to the ``security_policy_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service, security_policy_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetSecurityPolicyBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetSecurityPolicyBackendServiceRequest): + request = compute.SetSecurityPolicyBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + if security_policy_reference_resource is not None: + request.security_policy_reference_resource = security_policy_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_security_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateBackendServiceRequest, dict] = None, + *, + project: str = None, + backend_service: str = None, + backend_service_resource: compute.BackendService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified BackendService resource with + the data included in the request. For more information, + see Backend services overview. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateBackendServiceRequest, dict]): + The request object. A request message for + BackendServices.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to update. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + This corresponds to the ``backend_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, backend_service, backend_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateBackendServiceRequest): + request = compute.UpdateBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if backend_service is not None: + request.backend_service = backend_service + if backend_service_resource is not None: + request.backend_service_resource = backend_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "BackendServicesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/pagers.py new file mode 100644 index 000000000..a57fac782 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.BackendServiceAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.BackendServiceAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.BackendServiceAggregatedList], + request: compute.AggregatedListBackendServicesRequest, + response: compute.BackendServiceAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListBackendServicesRequest): + The initial request object. + response (google.cloud.compute_v1.types.BackendServiceAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListBackendServicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.BackendServiceAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.BackendServicesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.BackendServicesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.BackendServiceList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.BackendServiceList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.BackendServiceList], + request: compute.ListBackendServicesRequest, + response: compute.BackendServiceList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListBackendServicesRequest): + The initial request object. + response (google.cloud.compute_v1.types.BackendServiceList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListBackendServicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.BackendServiceList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.BackendService]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/__init__.py new file mode 100644 index 000000000..f8810f77d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import BackendServicesTransport +from .rest import BackendServicesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[BackendServicesTransport]] +_transport_registry['rest'] = BackendServicesRestTransport + +__all__ = ( + 'BackendServicesTransport', + 'BackendServicesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/base.py new file mode 100644 index 000000000..6b9c6e165 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/base.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class BackendServicesTransport(abc.ABC): + """Abstract transport class for BackendServices.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_signed_url_key: gapic_v1.method.wrap_method( + self.add_signed_url_key, + default_timeout=None, + client_info=client_info, + ), + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.delete_signed_url_key: gapic_v1.method.wrap_method( + self.delete_signed_url_key, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_health: gapic_v1.method.wrap_method( + self.get_health, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_security_policy: gapic_v1.method.wrap_method( + self.set_security_policy, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_signed_url_key(self) -> Callable[ + [compute.AddSignedUrlKeyBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListBackendServicesRequest], + Union[ + compute.BackendServiceAggregatedList, + Awaitable[compute.BackendServiceAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_signed_url_key(self) -> Callable[ + [compute.DeleteSignedUrlKeyBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetBackendServiceRequest], + Union[ + compute.BackendService, + Awaitable[compute.BackendService] + ]]: + raise NotImplementedError() + + @property + def get_health(self) -> Callable[ + [compute.GetHealthBackendServiceRequest], + Union[ + compute.BackendServiceGroupHealth, + Awaitable[compute.BackendServiceGroupHealth] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListBackendServicesRequest], + Union[ + compute.BackendServiceList, + Awaitable[compute.BackendServiceList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_security_policy(self) -> Callable[ + [compute.SetSecurityPolicyBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'BackendServicesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/rest.py new file mode 100644 index 000000000..bf8d91705 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/backend_services/transports/rest.py @@ -0,0 +1,1330 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import BackendServicesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class BackendServicesRestTransport(BackendServicesTransport): + """REST backend transport for BackendServices. + + The BackendServices API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_signed_url_key(self, + request: compute.AddSignedUrlKeyBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add signed url key method over HTTP. + + Args: + request (~.compute.AddSignedUrlKeyBackendServiceRequest): + The request object. A request message for + BackendServices.AddSignedUrlKey. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}/addSignedUrlKey', + 'body': 'signed_url_key_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AddSignedUrlKeyBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SignedUrlKey.to_json( + compute.SignedUrlKey( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddSignedUrlKeyBackendServiceRequest.to_json( + compute.AddSignedUrlKeyBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _aggregated_list(self, + request: compute.AggregatedListBackendServicesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendServiceAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListBackendServicesRequest): + The request object. A request message for + BackendServices.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendServiceAggregatedList: + Contains a list of + BackendServicesScopedList. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/backendServices', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListBackendServicesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListBackendServicesRequest.to_json( + compute.AggregatedListBackendServicesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendServiceAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteBackendServiceRequest): + The request object. A request message for + BackendServices.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteBackendServiceRequest.to_json( + compute.DeleteBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_signed_url_key(self, + request: compute.DeleteSignedUrlKeyBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete signed url key method over HTTP. + + Args: + request (~.compute.DeleteSignedUrlKeyBackendServiceRequest): + The request object. A request message for + BackendServices.DeleteSignedUrlKey. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}/deleteSignedUrlKey', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "key_name", + "keyName" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteSignedUrlKeyBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteSignedUrlKeyBackendServiceRequest.to_json( + compute.DeleteSignedUrlKeyBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendService: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetBackendServiceRequest): + The request object. A request message for + BackendServices.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendService: + Represents a Backend Service resource. A backend service + defines how Google Cloud load balancers distribute + traffic. The backend service configuration contains a + set of values, such as the protocol used to connect to + backends, various distribution and session settings, + health checks, and timeouts. These settings provide + fine-grained control over how your load balancer + behaves. Most of the settings have default values that + allow for easy configuration if you need to get started + quickly. Backend services in Google Compute Engine can + be either regionally or globally scoped. \* + `Global `__ + \* + `Regional `__ + For more information, see Backend Services. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetBackendServiceRequest.to_json( + compute.GetBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendService.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_health(self, + request: compute.GetHealthBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendServiceGroupHealth: + r"""Call the get health method over HTTP. + + Args: + request (~.compute.GetHealthBackendServiceRequest): + The request object. A request message for + BackendServices.GetHealth. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendServiceGroupHealth: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}/getHealth', + 'body': 'resource_group_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetHealthBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ResourceGroupReference.to_json( + compute.ResourceGroupReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetHealthBackendServiceRequest.to_json( + compute.GetHealthBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendServiceGroupHealth.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertBackendServiceRequest): + The request object. A request message for + BackendServices.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendServices', + 'body': 'backend_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendService.to_json( + compute.BackendService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertBackendServiceRequest.to_json( + compute.InsertBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListBackendServicesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendServiceList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListBackendServicesRequest): + The request object. A request message for + BackendServices.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendServiceList: + Contains a list of BackendService + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/backendServices', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListBackendServicesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListBackendServicesRequest.to_json( + compute.ListBackendServicesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendServiceList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchBackendServiceRequest): + The request object. A request message for + BackendServices.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}', + 'body': 'backend_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendService.to_json( + compute.BackendService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchBackendServiceRequest.to_json( + compute.PatchBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_security_policy(self, + request: compute.SetSecurityPolicyBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set security policy method over HTTP. + + Args: + request (~.compute.SetSecurityPolicyBackendServiceRequest): + The request object. A request message for + BackendServices.SetSecurityPolicy. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}/setSecurityPolicy', + 'body': 'security_policy_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.SetSecurityPolicyBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SecurityPolicyReference.to_json( + compute.SecurityPolicyReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetSecurityPolicyBackendServiceRequest.to_json( + compute.SetSecurityPolicyBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateBackendServiceRequest): + The request object. A request message for + BackendServices.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/global/backendServices/{backend_service}', + 'body': 'backend_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.UpdateBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendService.to_json( + compute.BackendService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateBackendServiceRequest.to_json( + compute.UpdateBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_signed_url_key(self) -> Callable[ + [compute.AddSignedUrlKeyBackendServiceRequest], + compute.Operation]: + return self._add_signed_url_key + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListBackendServicesRequest], + compute.BackendServiceAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteBackendServiceRequest], + compute.Operation]: + return self._delete + @ property + def delete_signed_url_key(self) -> Callable[ + [compute.DeleteSignedUrlKeyBackendServiceRequest], + compute.Operation]: + return self._delete_signed_url_key + @ property + def get(self) -> Callable[ + [compute.GetBackendServiceRequest], + compute.BackendService]: + return self._get + @ property + def get_health(self) -> Callable[ + [compute.GetHealthBackendServiceRequest], + compute.BackendServiceGroupHealth]: + return self._get_health + @ property + def insert(self) -> Callable[ + [compute.InsertBackendServiceRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListBackendServicesRequest], + compute.BackendServiceList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchBackendServiceRequest], + compute.Operation]: + return self._patch + @ property + def set_security_policy(self) -> Callable[ + [compute.SetSecurityPolicyBackendServiceRequest], + compute.Operation]: + return self._set_security_policy + @ property + def update(self) -> Callable[ + [compute.UpdateBackendServiceRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'BackendServicesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/__init__.py new file mode 100644 index 000000000..0f07d766c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DiskTypesClient + +__all__ = ( + 'DiskTypesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/client.py new file mode 100644 index 000000000..c768f0115 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/client.py @@ -0,0 +1,611 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.disk_types import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import DiskTypesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import DiskTypesRestTransport + + +class DiskTypesClientMeta(type): + """Metaclass for the DiskTypes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[DiskTypesTransport]] + _transport_registry["rest"] = DiskTypesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[DiskTypesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DiskTypesClient(metaclass=DiskTypesClientMeta): + """The DiskTypes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DiskTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DiskTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DiskTypesTransport: + """Returns the transport used by the client instance. + + Returns: + DiskTypesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DiskTypesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the disk types client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DiskTypesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DiskTypesTransport): + # transport is a DiskTypesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListDiskTypesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of disk types. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListDiskTypesRequest, dict]): + The request object. A request message for + DiskTypes.AggregatedList. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.disk_types.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListDiskTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListDiskTypesRequest): + request = compute.AggregatedListDiskTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetDiskTypeRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.DiskType: + r"""Returns the specified disk type. Gets a list of + available disk types by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetDiskTypeRequest, dict]): + The request object. A request message for DiskTypes.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk_type (str): + Name of the disk type to return. + This corresponds to the ``disk_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.DiskType: + Represents a Disk Type resource. Google Compute Engine + has two Disk Type resources: \* + [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) + \* [Zonal](/compute/docs/reference/rest/v1/diskTypes) + You can choose from a variety of disk types based on + your needs. For more information, read Storage options. + The diskTypes resource represents disk types for a zonal + persistent disk. For more information, read Zonal + persistent disks. The regionDiskTypes resource + represents disk types for a regional persistent disk. + For more information, read Regional persistent disks. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetDiskTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetDiskTypeRequest): + request = compute.GetDiskTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk_type is not None: + request.disk_type = disk_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListDiskTypesRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of disk types available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListDiskTypesRequest, dict]): + The request object. A request message for + DiskTypes.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.disk_types.pagers.ListPager: + Contains a list of disk types. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListDiskTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListDiskTypesRequest): + request = compute.ListDiskTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DiskTypesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/pagers.py new file mode 100644 index 000000000..f29c9a64c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.DiskTypeAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.DiskTypeAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.DiskTypeAggregatedList], + request: compute.AggregatedListDiskTypesRequest, + response: compute.DiskTypeAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListDiskTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.DiskTypeAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListDiskTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.DiskTypeAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.DiskTypesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.DiskTypesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.DiskTypeList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.DiskTypeList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.DiskTypeList], + request: compute.ListDiskTypesRequest, + response: compute.DiskTypeList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListDiskTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.DiskTypeList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListDiskTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.DiskTypeList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.DiskType]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/__init__.py new file mode 100644 index 000000000..c8201c7b7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DiskTypesTransport +from .rest import DiskTypesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DiskTypesTransport]] +_transport_registry['rest'] = DiskTypesRestTransport + +__all__ = ( + 'DiskTypesTransport', + 'DiskTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/base.py new file mode 100644 index 000000000..02b9f3428 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/base.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class DiskTypesTransport(abc.ABC): + """Abstract transport class for DiskTypes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListDiskTypesRequest], + Union[ + compute.DiskTypeAggregatedList, + Awaitable[compute.DiskTypeAggregatedList] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetDiskTypeRequest], + Union[ + compute.DiskType, + Awaitable[compute.DiskType] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListDiskTypesRequest], + Union[ + compute.DiskTypeList, + Awaitable[compute.DiskTypeList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'DiskTypesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/rest.py new file mode 100644 index 000000000..2cf17014a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disk_types/transports/rest.py @@ -0,0 +1,417 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import DiskTypesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class DiskTypesRestTransport(DiskTypesTransport): + """REST backend transport for DiskTypes. + + The DiskTypes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListDiskTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DiskTypeAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListDiskTypesRequest): + The request object. A request message for + DiskTypes.AggregatedList. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DiskTypeAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/diskTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListDiskTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListDiskTypesRequest.to_json( + compute.AggregatedListDiskTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DiskTypeAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetDiskTypeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DiskType: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetDiskTypeRequest): + The request object. A request message for DiskTypes.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DiskType: + Represents a Disk Type resource. Google Compute Engine + has two Disk Type resources: \* + `Regional `__ + \* `Zonal `__ + You can choose from a variety of disk types based on + your needs. For more information, read Storage options. + The diskTypes resource represents disk types for a zonal + persistent disk. For more information, read Zonal + persistent disks. The regionDiskTypes resource + represents disk types for a regional persistent disk. + For more information, read Regional persistent disks. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/diskTypes/{disk_type}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk_type", + "diskType" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetDiskTypeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetDiskTypeRequest.to_json( + compute.GetDiskTypeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DiskType.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListDiskTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DiskTypeList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListDiskTypesRequest): + The request object. A request message for DiskTypes.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DiskTypeList: + Contains a list of disk types. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/diskTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListDiskTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListDiskTypesRequest.to_json( + compute.ListDiskTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DiskTypeList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListDiskTypesRequest], + compute.DiskTypeAggregatedList]: + return self._aggregated_list + @ property + def get(self) -> Callable[ + [compute.GetDiskTypeRequest], + compute.DiskType]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListDiskTypesRequest], + compute.DiskTypeList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'DiskTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/__init__.py new file mode 100644 index 000000000..bb0139e6c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import DisksClient + +__all__ = ( + 'DisksClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/client.py new file mode 100644 index 000000000..9b2adfe1a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/client.py @@ -0,0 +1,1698 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.disks import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import DisksTransport, DEFAULT_CLIENT_INFO +from .transports.rest import DisksRestTransport + + +class DisksClientMeta(type): + """Metaclass for the Disks client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[DisksTransport]] + _transport_registry["rest"] = DisksRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[DisksTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class DisksClient(metaclass=DisksClientMeta): + """The Disks API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DisksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + DisksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> DisksTransport: + """Returns the transport used by the client instance. + + Returns: + DisksTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, DisksTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the disks client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, DisksTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, DisksTransport): + # transport is a DisksTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_resource_policies(self, + request: Union[compute.AddResourcePoliciesDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk: str = None, + disks_add_resource_policies_request_resource: compute.DisksAddResourcePoliciesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds existing resource policies to a disk. You can + only add one policy which will be applied to this disk + for scheduling snapshot creation. + + Args: + request (Union[google.cloud.compute_v1.types.AddResourcePoliciesDiskRequest, dict]): + The request object. A request message for + Disks.AddResourcePolicies. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + The disk name for this request. + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disks_add_resource_policies_request_resource (google.cloud.compute_v1.types.DisksAddResourcePoliciesRequest): + The body resource for this request + This corresponds to the ``disks_add_resource_policies_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk, disks_add_resource_policies_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddResourcePoliciesDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddResourcePoliciesDiskRequest): + request = compute.AddResourcePoliciesDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk is not None: + request.disk = disk + if disks_add_resource_policies_request_resource is not None: + request.disks_add_resource_policies_request_resource = disks_add_resource_policies_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_resource_policies] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def aggregated_list(self, + request: Union[compute.AggregatedListDisksRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of persistent disks. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListDisksRequest, dict]): + The request object. A request message for + Disks.AggregatedList. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.disks.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListDisksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListDisksRequest): + request = compute.AggregatedListDisksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_snapshot(self, + request: Union[compute.CreateSnapshotDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk: str = None, + snapshot_resource: compute.Snapshot = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a snapshot of a specified persistent disk. + + Args: + request (Union[google.cloud.compute_v1.types.CreateSnapshotDiskRequest, dict]): + The request object. A request message for + Disks.CreateSnapshot. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + Name of the persistent disk to + snapshot. + + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_resource (google.cloud.compute_v1.types.Snapshot): + The body resource for this request + This corresponds to the ``snapshot_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk, snapshot_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.CreateSnapshotDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.CreateSnapshotDiskRequest): + request = compute.CreateSnapshotDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk is not None: + request.disk = disk + if snapshot_resource is not None: + request.snapshot_resource = snapshot_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_snapshot] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified persistent disk. Deleting a + disk removes its data permanently and is irreversible. + However, deleting a disk does not delete any snapshots + previously made from the disk. You must separately + delete snapshots. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteDiskRequest, dict]): + The request object. A request message for Disks.Delete. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + Name of the persistent disk to + delete. + + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteDiskRequest): + request = compute.DeleteDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk is not None: + request.disk = disk + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Disk: + r"""Returns a specified persistent disk. Gets a list of + available persistent disks by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetDiskRequest, dict]): + The request object. A request message for Disks.Get. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + Name of the persistent disk to + return. + + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Disk: + Represents a Persistent Disk resource. Google Compute + Engine has two Disk resources: \* + [Zonal](/compute/docs/reference/rest/v1/disks) \* + [Regional](/compute/docs/reference/rest/v1/regionDisks) + Persistent disks are required for running your VM + instances. Create both boot and non-boot (data) + persistent disks. For more information, read Persistent + Disks. For more storage options, read Storage options. + The disks resource represents a zonal persistent disk. + For more information, read Zonal persistent disks. The + regionDisks resource represents a regional persistent + disk. For more information, read Regional resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetDiskRequest): + request = compute.GetDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk is not None: + request.disk = disk + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyDiskRequest, dict]): + The request object. A request message for + Disks.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyDiskRequest): + request = compute.GetIamPolicyDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk_resource: compute.Disk = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a persistent disk in the specified project + using the data in the request. You can create a disk + from a source (sourceImage, sourceSnapshot, or + sourceDisk) or create an empty 500 GB data disk by + omitting all properties. You can also create a disk that + is larger than the default size by specifying the sizeGb + property. + + Args: + request (Union[google.cloud.compute_v1.types.InsertDiskRequest, dict]): + The request object. A request message for Disks.Insert. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk_resource (google.cloud.compute_v1.types.Disk): + The body resource for this request + This corresponds to the ``disk_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertDiskRequest): + request = compute.InsertDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk_resource is not None: + request.disk_resource = disk_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListDisksRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of persistent disks contained within + the specified zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListDisksRequest, dict]): + The request object. A request message for Disks.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.disks.pagers.ListPager: + A list of Disk resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListDisksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListDisksRequest): + request = compute.ListDisksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_resource_policies(self, + request: Union[compute.RemoveResourcePoliciesDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk: str = None, + disks_remove_resource_policies_request_resource: compute.DisksRemoveResourcePoliciesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes resource policies from a disk. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveResourcePoliciesDiskRequest, dict]): + The request object. A request message for + Disks.RemoveResourcePolicies. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + The disk name for this request. + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disks_remove_resource_policies_request_resource (google.cloud.compute_v1.types.DisksRemoveResourcePoliciesRequest): + The body resource for this request + This corresponds to the ``disks_remove_resource_policies_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk, disks_remove_resource_policies_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveResourcePoliciesDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveResourcePoliciesDiskRequest): + request = compute.RemoveResourcePoliciesDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk is not None: + request.disk = disk + if disks_remove_resource_policies_request_resource is not None: + request.disks_remove_resource_policies_request_resource = disks_remove_resource_policies_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_resource_policies] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def resize(self, + request: Union[compute.ResizeDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + disk: str = None, + disks_resize_request_resource: compute.DisksResizeRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Resizes the specified persistent disk. You can only + increase the size of the disk. + + Args: + request (Union[google.cloud.compute_v1.types.ResizeDiskRequest, dict]): + The request object. A request message for Disks.Resize. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + The name of the persistent disk. + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disks_resize_request_resource (google.cloud.compute_v1.types.DisksResizeRequest): + The body resource for this request + This corresponds to the ``disks_resize_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, disk, disks_resize_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ResizeDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ResizeDiskRequest): + request = compute.ResizeDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if disk is not None: + request.disk = disk + if disks_resize_request_resource is not None: + request.disks_resize_request_resource = disks_resize_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resize] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + zone_set_policy_request_resource: compute.ZoneSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyDiskRequest, dict]): + The request object. A request message for + Disks.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + This corresponds to the ``zone_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, zone_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyDiskRequest): + request = compute.SetIamPolicyDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if zone_set_policy_request_resource is not None: + request.zone_set_policy_request_resource = zone_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + zone_set_labels_request_resource: compute.ZoneSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on a disk. To learn more about + labels, read the Labeling Resources documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsDiskRequest, dict]): + The request object. A request message for + Disks.SetLabels. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone_set_labels_request_resource (google.cloud.compute_v1.types.ZoneSetLabelsRequest): + The body resource for this request + This corresponds to the ``zone_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, zone_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsDiskRequest): + request = compute.SetLabelsDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if zone_set_labels_request_resource is not None: + request.zone_set_labels_request_resource = zone_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsDiskRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsDiskRequest, dict]): + The request object. A request message for + Disks.TestIamPermissions. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsDiskRequest): + request = compute.TestIamPermissionsDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "DisksClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/pagers.py new file mode 100644 index 000000000..4b2691602 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.DiskAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.DiskAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.DiskAggregatedList], + request: compute.AggregatedListDisksRequest, + response: compute.DiskAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListDisksRequest): + The initial request object. + response (google.cloud.compute_v1.types.DiskAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListDisksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.DiskAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.DisksScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.DisksScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.DiskList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.DiskList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.DiskList], + request: compute.ListDisksRequest, + response: compute.DiskList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListDisksRequest): + The initial request object. + response (google.cloud.compute_v1.types.DiskList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListDisksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.DiskList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Disk]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/__init__.py new file mode 100644 index 000000000..2d898aa72 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import DisksTransport +from .rest import DisksRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[DisksTransport]] +_transport_registry['rest'] = DisksRestTransport + +__all__ = ( + 'DisksTransport', + 'DisksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/base.py new file mode 100644 index 000000000..0a895b9d4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/base.py @@ -0,0 +1,315 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class DisksTransport(abc.ABC): + """Abstract transport class for Disks.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_resource_policies: gapic_v1.method.wrap_method( + self.add_resource_policies, + default_timeout=None, + client_info=client_info, + ), + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.create_snapshot: gapic_v1.method.wrap_method( + self.create_snapshot, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.remove_resource_policies: gapic_v1.method.wrap_method( + self.remove_resource_policies, + default_timeout=None, + client_info=client_info, + ), + self.resize: gapic_v1.method.wrap_method( + self.resize, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_resource_policies(self) -> Callable[ + [compute.AddResourcePoliciesDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListDisksRequest], + Union[ + compute.DiskAggregatedList, + Awaitable[compute.DiskAggregatedList] + ]]: + raise NotImplementedError() + + @property + def create_snapshot(self) -> Callable[ + [compute.CreateSnapshotDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetDiskRequest], + Union[ + compute.Disk, + Awaitable[compute.Disk] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyDiskRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListDisksRequest], + Union[ + compute.DiskList, + Awaitable[compute.DiskList] + ]]: + raise NotImplementedError() + + @property + def remove_resource_policies(self) -> Callable[ + [compute.RemoveResourcePoliciesDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def resize(self) -> Callable[ + [compute.ResizeDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyDiskRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsDiskRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'DisksTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/rest.py new file mode 100644 index 000000000..2f31e5500 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/disks/transports/rest.py @@ -0,0 +1,1648 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import DisksTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class DisksRestTransport(DisksTransport): + """REST backend transport for Disks. + + The Disks API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_resource_policies(self, + request: compute.AddResourcePoliciesDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add resource policies method over HTTP. + + Args: + request (~.compute.AddResourcePoliciesDiskRequest): + The request object. A request message for + Disks.AddResourcePolicies. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies', + 'body': 'disks_add_resource_policies_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AddResourcePoliciesDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.DisksAddResourcePoliciesRequest.to_json( + compute.DisksAddResourcePoliciesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddResourcePoliciesDiskRequest.to_json( + compute.AddResourcePoliciesDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _aggregated_list(self, + request: compute.AggregatedListDisksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DiskAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListDisksRequest): + The request object. A request message for + Disks.AggregatedList. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DiskAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/disks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListDisksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListDisksRequest.to_json( + compute.AggregatedListDisksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DiskAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _create_snapshot(self, + request: compute.CreateSnapshotDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the create snapshot method over HTTP. + + Args: + request (~.compute.CreateSnapshotDiskRequest): + The request object. A request message for + Disks.CreateSnapshot. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/createSnapshot', + 'body': 'snapshot_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.CreateSnapshotDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Snapshot.to_json( + compute.Snapshot( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.CreateSnapshotDiskRequest.to_json( + compute.CreateSnapshotDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteDiskRequest): + The request object. A request message for Disks.Delete. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{disk}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteDiskRequest.to_json( + compute.DeleteDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Disk: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetDiskRequest): + The request object. A request message for Disks.Get. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Disk: + Represents a Persistent Disk resource. Google Compute + Engine has two Disk resources: \* + `Zonal `__ \* + `Regional `__ + Persistent disks are required for running your VM + instances. Create both boot and non-boot (data) + persistent disks. For more information, read Persistent + Disks. For more storage options, read Storage options. + The disks resource represents a zonal persistent disk. + For more information, read Zonal persistent disks. The + regionDisks resource represents a regional persistent + disk. For more information, read Regional resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{disk}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetDiskRequest.to_json( + compute.GetDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Disk.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyDiskRequest): + The request object. A request message for + Disks.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetIamPolicyDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyDiskRequest.to_json( + compute.GetIamPolicyDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertDiskRequest): + The request object. A request message for Disks.Insert. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks', + 'body': 'disk_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Disk.to_json( + compute.Disk( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertDiskRequest.to_json( + compute.InsertDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListDisksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DiskList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListDisksRequest): + The request object. A request message for Disks.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DiskList: + A list of Disk resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListDisksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListDisksRequest.to_json( + compute.ListDisksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DiskList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_resource_policies(self, + request: compute.RemoveResourcePoliciesDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove resource policies method over HTTP. + + Args: + request (~.compute.RemoveResourcePoliciesDiskRequest): + The request object. A request message for + Disks.RemoveResourcePolicies. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies', + 'body': 'disks_remove_resource_policies_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.RemoveResourcePoliciesDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.DisksRemoveResourcePoliciesRequest.to_json( + compute.DisksRemoveResourcePoliciesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveResourcePoliciesDiskRequest.to_json( + compute.RemoveResourcePoliciesDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _resize(self, + request: compute.ResizeDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the resize method over HTTP. + + Args: + request (~.compute.ResizeDiskRequest): + The request object. A request message for Disks.Resize. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/resize', + 'body': 'disks_resize_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ResizeDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.DisksResizeRequest.to_json( + compute.DisksResizeRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ResizeDiskRequest.to_json( + compute.ResizeDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyDiskRequest): + The request object. A request message for + Disks.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy', + 'body': 'zone_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetIamPolicyDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ZoneSetPolicyRequest.to_json( + compute.ZoneSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyDiskRequest.to_json( + compute.SetIamPolicyDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsDiskRequest): + The request object. A request message for + Disks.SetLabels. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/setLabels', + 'body': 'zone_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetLabelsDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ZoneSetLabelsRequest.to_json( + compute.ZoneSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsDiskRequest.to_json( + compute.SetLabelsDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsDiskRequest): + The request object. A request message for + Disks.TestIamPermissions. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.TestIamPermissionsDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsDiskRequest.to_json( + compute.TestIamPermissionsDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_resource_policies(self) -> Callable[ + [compute.AddResourcePoliciesDiskRequest], + compute.Operation]: + return self._add_resource_policies + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListDisksRequest], + compute.DiskAggregatedList]: + return self._aggregated_list + @ property + def create_snapshot(self) -> Callable[ + [compute.CreateSnapshotDiskRequest], + compute.Operation]: + return self._create_snapshot + @ property + def delete(self) -> Callable[ + [compute.DeleteDiskRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetDiskRequest], + compute.Disk]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyDiskRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertDiskRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListDisksRequest], + compute.DiskList]: + return self._list + @ property + def remove_resource_policies(self) -> Callable[ + [compute.RemoveResourcePoliciesDiskRequest], + compute.Operation]: + return self._remove_resource_policies + @ property + def resize(self) -> Callable[ + [compute.ResizeDiskRequest], + compute.Operation]: + return self._resize + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyDiskRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsDiskRequest], + compute.Operation]: + return self._set_labels + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsDiskRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'DisksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/__init__.py new file mode 100644 index 000000000..d9240223e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ExternalVpnGatewaysClient + +__all__ = ( + 'ExternalVpnGatewaysClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/client.py new file mode 100644 index 000000000..e332bba52 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/client.py @@ -0,0 +1,883 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.external_vpn_gateways import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ExternalVpnGatewaysTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ExternalVpnGatewaysRestTransport + + +class ExternalVpnGatewaysClientMeta(type): + """Metaclass for the ExternalVpnGateways client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ExternalVpnGatewaysTransport]] + _transport_registry["rest"] = ExternalVpnGatewaysRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ExternalVpnGatewaysTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ExternalVpnGatewaysClient(metaclass=ExternalVpnGatewaysClientMeta): + """The ExternalVpnGateways API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExternalVpnGatewaysClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ExternalVpnGatewaysClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ExternalVpnGatewaysTransport: + """Returns the transport used by the client instance. + + Returns: + ExternalVpnGatewaysTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ExternalVpnGatewaysTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the external vpn gateways client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ExternalVpnGatewaysTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ExternalVpnGatewaysTransport): + # transport is a ExternalVpnGatewaysTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteExternalVpnGatewayRequest, dict] = None, + *, + project: str = None, + external_vpn_gateway: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified externalVpnGateway. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteExternalVpnGatewayRequest, dict]): + The request object. A request message for + ExternalVpnGateways.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + external_vpn_gateway (str): + Name of the externalVpnGateways to + delete. + + This corresponds to the ``external_vpn_gateway`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, external_vpn_gateway]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteExternalVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteExternalVpnGatewayRequest): + request = compute.DeleteExternalVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if external_vpn_gateway is not None: + request.external_vpn_gateway = external_vpn_gateway + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetExternalVpnGatewayRequest, dict] = None, + *, + project: str = None, + external_vpn_gateway: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.ExternalVpnGateway: + r"""Returns the specified externalVpnGateway. Get a list + of available externalVpnGateways by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetExternalVpnGatewayRequest, dict]): + The request object. A request message for + ExternalVpnGateways.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + external_vpn_gateway (str): + Name of the externalVpnGateway to + return. + + This corresponds to the ``external_vpn_gateway`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.ExternalVpnGateway: + Represents an external VPN gateway. + External VPN gateway is the on-premises + VPN gateway(s) or another cloud + provider's VPN gateway that connects to + your Google Cloud VPN gateway. To create + a highly available VPN from Google Cloud + Platform to your VPN gateway or another + cloud provider's VPN gateway, you must + create a external VPN gateway resource + with information about the other + gateway. For more information about + using external VPN gateways, see + Creating an HA VPN gateway and tunnel + pair to a peer VPN. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, external_vpn_gateway]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetExternalVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetExternalVpnGatewayRequest): + request = compute.GetExternalVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if external_vpn_gateway is not None: + request.external_vpn_gateway = external_vpn_gateway + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertExternalVpnGatewayRequest, dict] = None, + *, + project: str = None, + external_vpn_gateway_resource: compute.ExternalVpnGateway = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a ExternalVpnGateway in the specified project + using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertExternalVpnGatewayRequest, dict]): + The request object. A request message for + ExternalVpnGateways.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + external_vpn_gateway_resource (google.cloud.compute_v1.types.ExternalVpnGateway): + The body resource for this request + This corresponds to the ``external_vpn_gateway_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, external_vpn_gateway_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertExternalVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertExternalVpnGatewayRequest): + request = compute.InsertExternalVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if external_vpn_gateway_resource is not None: + request.external_vpn_gateway_resource = external_vpn_gateway_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListExternalVpnGatewaysRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of ExternalVpnGateway available to + the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListExternalVpnGatewaysRequest, dict]): + The request object. A request message for + ExternalVpnGateways.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.external_vpn_gateways.pagers.ListPager: + Response to the list request, and + contains a list of externalVpnGateways. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListExternalVpnGatewaysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListExternalVpnGatewaysRequest): + request = compute.ListExternalVpnGatewaysRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsExternalVpnGatewayRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_labels_request_resource: compute.GlobalSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on an ExternalVpnGateway. To learn + more about labels, read the Labeling Resources + documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsExternalVpnGatewayRequest, dict]): + The request object. A request message for + ExternalVpnGateways.SetLabels. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + This corresponds to the ``global_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsExternalVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsExternalVpnGatewayRequest): + request = compute.SetLabelsExternalVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_labels_request_resource is not None: + request.global_set_labels_request_resource = global_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsExternalVpnGatewayRequest, dict] = None, + *, + project: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsExternalVpnGatewayRequest, dict]): + The request object. A request message for + ExternalVpnGateways.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsExternalVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsExternalVpnGatewayRequest): + request = compute.TestIamPermissionsExternalVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ExternalVpnGatewaysClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/pagers.py new file mode 100644 index 000000000..b43fe6101 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ExternalVpnGatewayList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ExternalVpnGatewayList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ExternalVpnGatewayList], + request: compute.ListExternalVpnGatewaysRequest, + response: compute.ExternalVpnGatewayList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListExternalVpnGatewaysRequest): + The initial request object. + response (google.cloud.compute_v1.types.ExternalVpnGatewayList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListExternalVpnGatewaysRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ExternalVpnGatewayList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ExternalVpnGateway]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/__init__.py new file mode 100644 index 000000000..575f94aec --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ExternalVpnGatewaysTransport +from .rest import ExternalVpnGatewaysRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ExternalVpnGatewaysTransport]] +_transport_registry['rest'] = ExternalVpnGatewaysRestTransport + +__all__ = ( + 'ExternalVpnGatewaysTransport', + 'ExternalVpnGatewaysRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/base.py new file mode 100644 index 000000000..7d8a01a98 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ExternalVpnGatewaysTransport(abc.ABC): + """Abstract transport class for ExternalVpnGateways.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteExternalVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetExternalVpnGatewayRequest], + Union[ + compute.ExternalVpnGateway, + Awaitable[compute.ExternalVpnGateway] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertExternalVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListExternalVpnGatewaysRequest], + Union[ + compute.ExternalVpnGatewayList, + Awaitable[compute.ExternalVpnGatewayList] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsExternalVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsExternalVpnGatewayRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ExternalVpnGatewaysTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/rest.py new file mode 100644 index 000000000..32f898bdd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/external_vpn_gateways/transports/rest.py @@ -0,0 +1,770 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ExternalVpnGatewaysTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ExternalVpnGatewaysRestTransport(ExternalVpnGatewaysTransport): + """REST backend transport for ExternalVpnGateways. + + The ExternalVpnGateways API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteExternalVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteExternalVpnGatewayRequest): + The request object. A request message for + ExternalVpnGateways.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/externalVpnGateways/{external_vpn_gateway}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "external_vpn_gateway", + "externalVpnGateway" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteExternalVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteExternalVpnGatewayRequest.to_json( + compute.DeleteExternalVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetExternalVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ExternalVpnGateway: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetExternalVpnGatewayRequest): + The request object. A request message for + ExternalVpnGateways.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ExternalVpnGateway: + Represents an external VPN gateway. + External VPN gateway is the on-premises + VPN gateway(s) or another cloud + provider's VPN gateway that connects to + your Google Cloud VPN gateway. To create + a highly available VPN from Google Cloud + Platform to your VPN gateway or another + cloud provider's VPN gateway, you must + create a external VPN gateway resource + with information about the other + gateway. For more information about + using external VPN gateways, see + Creating an HA VPN gateway and tunnel + pair to a peer VPN. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/externalVpnGateways/{external_vpn_gateway}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "external_vpn_gateway", + "externalVpnGateway" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetExternalVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetExternalVpnGatewayRequest.to_json( + compute.GetExternalVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ExternalVpnGateway.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertExternalVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertExternalVpnGatewayRequest): + The request object. A request message for + ExternalVpnGateways.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/externalVpnGateways', + 'body': 'external_vpn_gateway_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertExternalVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ExternalVpnGateway.to_json( + compute.ExternalVpnGateway( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertExternalVpnGatewayRequest.to_json( + compute.InsertExternalVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListExternalVpnGatewaysRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ExternalVpnGatewayList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListExternalVpnGatewaysRequest): + The request object. A request message for + ExternalVpnGateways.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ExternalVpnGatewayList: + Response to the list request, and + contains a list of externalVpnGateways. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/externalVpnGateways', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListExternalVpnGatewaysRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListExternalVpnGatewaysRequest.to_json( + compute.ListExternalVpnGatewaysRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ExternalVpnGatewayList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsExternalVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsExternalVpnGatewayRequest): + The request object. A request message for + ExternalVpnGateways.SetLabels. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/externalVpnGateways/{resource}/setLabels', + 'body': 'global_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetLabelsExternalVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetLabelsRequest.to_json( + compute.GlobalSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsExternalVpnGatewayRequest.to_json( + compute.SetLabelsExternalVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsExternalVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsExternalVpnGatewayRequest): + The request object. A request message for + ExternalVpnGateways.TestIamPermissions. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsExternalVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsExternalVpnGatewayRequest.to_json( + compute.TestIamPermissionsExternalVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteExternalVpnGatewayRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetExternalVpnGatewayRequest], + compute.ExternalVpnGateway]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertExternalVpnGatewayRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListExternalVpnGatewaysRequest], + compute.ExternalVpnGatewayList]: + return self._list + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsExternalVpnGatewayRequest], + compute.Operation]: + return self._set_labels + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsExternalVpnGatewayRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'ExternalVpnGatewaysRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/__init__.py new file mode 100644 index 000000000..db3de7139 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FirewallPoliciesClient + +__all__ = ( + 'FirewallPoliciesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/client.py new file mode 100644 index 000000000..69a44324b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/client.py @@ -0,0 +1,1826 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.firewall_policies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import FirewallPoliciesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import FirewallPoliciesRestTransport + + +class FirewallPoliciesClientMeta(type): + """Metaclass for the FirewallPolicies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FirewallPoliciesTransport]] + _transport_registry["rest"] = FirewallPoliciesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FirewallPoliciesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FirewallPoliciesClient(metaclass=FirewallPoliciesClientMeta): + """The FirewallPolicies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FirewallPoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FirewallPoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FirewallPoliciesTransport: + """Returns the transport used by the client instance. + + Returns: + FirewallPoliciesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FirewallPoliciesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the firewall policies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FirewallPoliciesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FirewallPoliciesTransport): + # transport is a FirewallPoliciesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_association(self, + request: Union[compute.AddAssociationFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + firewall_policy_association_resource: compute.FirewallPolicyAssociation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Inserts an association for the specified firewall + policy. + + Args: + request (Union[google.cloud.compute_v1.types.AddAssociationFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.AddAssociation. See the method + description for details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_policy_association_resource (google.cloud.compute_v1.types.FirewallPolicyAssociation): + The body resource for this request + This corresponds to the ``firewall_policy_association_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy, firewall_policy_association_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddAssociationFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddAssociationFirewallPolicyRequest): + request = compute.AddAssociationFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + if firewall_policy_association_resource is not None: + request.firewall_policy_association_resource = firewall_policy_association_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_association] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_rule(self, + request: Union[compute.AddRuleFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + firewall_policy_rule_resource: compute.FirewallPolicyRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Inserts a rule into a firewall policy. + + Args: + request (Union[google.cloud.compute_v1.types.AddRuleFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.AddRule. See the method description for + details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_policy_rule_resource (google.cloud.compute_v1.types.FirewallPolicyRule): + The body resource for this request + This corresponds to the ``firewall_policy_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy, firewall_policy_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddRuleFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddRuleFirewallPolicyRequest): + request = compute.AddRuleFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + if firewall_policy_rule_resource is not None: + request.firewall_policy_rule_resource = firewall_policy_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def clone_rules(self, + request: Union[compute.CloneRulesFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Copies rules to the specified firewall policy. + + Args: + request (Union[google.cloud.compute_v1.types.CloneRulesFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.CloneRules. See the method description + for details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.CloneRulesFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.CloneRulesFirewallPolicyRequest): + request = compute.CloneRulesFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.clone_rules] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified policy. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.Delete. See the method description for + details. + firewall_policy (str): + Name of the firewall policy to + delete. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteFirewallPolicyRequest): + request = compute.DeleteFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.FirewallPolicy: + r"""Returns the specified firewall policy. + + Args: + request (Union[google.cloud.compute_v1.types.GetFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.Get. See the method description for + details. + firewall_policy (str): + Name of the firewall policy to get. + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.FirewallPolicy: + Represents a Firewall Policy + resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetFirewallPolicyRequest): + request = compute.GetFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_association(self, + request: Union[compute.GetAssociationFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.FirewallPolicyAssociation: + r"""Gets an association with the specified name. + + Args: + request (Union[google.cloud.compute_v1.types.GetAssociationFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.GetAssociation. See the method + description for details. + firewall_policy (str): + Name of the firewall policy to which + the queried rule belongs. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.FirewallPolicyAssociation: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetAssociationFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetAssociationFirewallPolicyRequest): + request = compute.GetAssociationFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_association] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyFirewallPolicyRequest, dict] = None, + *, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.GetIamPolicy. See the method + description for details. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyFirewallPolicyRequest): + request = compute.GetIamPolicyFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_rule(self, + request: Union[compute.GetRuleFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.FirewallPolicyRule: + r"""Gets a rule of the specified priority. + + Args: + request (Union[google.cloud.compute_v1.types.GetRuleFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.GetRule. See the method description for + details. + firewall_policy (str): + Name of the firewall policy to which + the queried rule belongs. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.FirewallPolicyRule: + Represents a rule that describes one + or more match conditions along with the + action to be taken when traffic matches + this condition (allow or deny). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRuleFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRuleFirewallPolicyRequest): + request = compute.GetRuleFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertFirewallPolicyRequest, dict] = None, + *, + parent_id: str = None, + firewall_policy_resource: compute.FirewallPolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a new policy in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.Insert. See the method description for + details. + parent_id (str): + Parent ID for this request. The ID can be either be + "folders/[FOLDER_ID]" if the parent is a folder or + "organizations/[ORGANIZATION_ID]" if the parent is an + organization. + + This corresponds to the ``parent_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_policy_resource (google.cloud.compute_v1.types.FirewallPolicy): + The body resource for this request + This corresponds to the ``firewall_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent_id, firewall_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertFirewallPolicyRequest): + request = compute.InsertFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent_id is not None: + request.parent_id = parent_id + if firewall_policy_resource is not None: + request.firewall_policy_resource = firewall_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListFirewallPoliciesRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists all the policies that have been configured for + the specified folder or organization. + + Args: + request (Union[google.cloud.compute_v1.types.ListFirewallPoliciesRequest, dict]): + The request object. A request message for + FirewallPolicies.List. See the method description for + details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.firewall_policies.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListFirewallPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListFirewallPoliciesRequest): + request = compute.ListFirewallPoliciesRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_associations(self, + request: Union[compute.ListAssociationsFirewallPolicyRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.FirewallPoliciesListAssociationsResponse: + r"""Lists associations of a specified target, i.e., + organization or folder. + + Args: + request (Union[google.cloud.compute_v1.types.ListAssociationsFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.ListAssociations. See the method + description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.FirewallPoliciesListAssociationsResponse: + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListAssociationsFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListAssociationsFirewallPolicyRequest): + request = compute.ListAssociationsFirewallPolicyRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_associations] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def move(self, + request: Union[compute.MoveFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + parent_id: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Moves the specified firewall policy. + + Args: + request (Union[google.cloud.compute_v1.types.MoveFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.Move. See the method description for + details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + parent_id (str): + The new parent of the firewall + policy. + + This corresponds to the ``parent_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy, parent_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.MoveFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.MoveFirewallPolicyRequest): + request = compute.MoveFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + if parent_id is not None: + request.parent_id = parent_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.move] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + firewall_policy_resource: compute.FirewallPolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified policy with the data included + in the request. + + Args: + request (Union[google.cloud.compute_v1.types.PatchFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.Patch. See the method description for + details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_policy_resource (google.cloud.compute_v1.types.FirewallPolicy): + The body resource for this request + This corresponds to the ``firewall_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy, firewall_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchFirewallPolicyRequest): + request = compute.PatchFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + if firewall_policy_resource is not None: + request.firewall_policy_resource = firewall_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch_rule(self, + request: Union[compute.PatchRuleFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + firewall_policy_rule_resource: compute.FirewallPolicyRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches a rule of the specified priority. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRuleFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.PatchRule. See the method description + for details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_policy_rule_resource (google.cloud.compute_v1.types.FirewallPolicyRule): + The body resource for this request + This corresponds to the ``firewall_policy_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy, firewall_policy_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRuleFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRuleFirewallPolicyRequest): + request = compute.PatchRuleFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + if firewall_policy_rule_resource is not None: + request.firewall_policy_rule_resource = firewall_policy_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_association(self, + request: Union[compute.RemoveAssociationFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes an association for the specified firewall + policy. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveAssociationFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.RemoveAssociation. See the method + description for details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveAssociationFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveAssociationFirewallPolicyRequest): + request = compute.RemoveAssociationFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_association] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_rule(self, + request: Union[compute.RemoveRuleFirewallPolicyRequest, dict] = None, + *, + firewall_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes a rule of the specified priority. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveRuleFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.RemoveRule. See the method description + for details. + firewall_policy (str): + Name of the firewall policy to + update. + + This corresponds to the ``firewall_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([firewall_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveRuleFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveRuleFirewallPolicyRequest): + request = compute.RemoveRuleFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if firewall_policy is not None: + request.firewall_policy = firewall_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyFirewallPolicyRequest, dict] = None, + *, + resource: str = None, + global_organization_set_policy_request_resource: compute.GlobalOrganizationSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.SetIamPolicy. See the method + description for details. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_organization_set_policy_request_resource (google.cloud.compute_v1.types.GlobalOrganizationSetPolicyRequest): + The body resource for this request + This corresponds to the ``global_organization_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, global_organization_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyFirewallPolicyRequest): + request = compute.SetIamPolicyFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if resource is not None: + request.resource = resource + if global_organization_set_policy_request_resource is not None: + request.global_organization_set_policy_request_resource = global_organization_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsFirewallPolicyRequest, dict] = None, + *, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsFirewallPolicyRequest, dict]): + The request object. A request message for + FirewallPolicies.TestIamPermissions. See the method + description for details. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsFirewallPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsFirewallPolicyRequest): + request = compute.TestIamPermissionsFirewallPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FirewallPoliciesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/pagers.py new file mode 100644 index 000000000..6ead1120a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.FirewallPolicyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.FirewallPolicyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.FirewallPolicyList], + request: compute.ListFirewallPoliciesRequest, + response: compute.FirewallPolicyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListFirewallPoliciesRequest): + The initial request object. + response (google.cloud.compute_v1.types.FirewallPolicyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListFirewallPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.FirewallPolicyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.FirewallPolicy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/__init__.py new file mode 100644 index 000000000..728be52ea --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FirewallPoliciesTransport +from .rest import FirewallPoliciesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FirewallPoliciesTransport]] +_transport_registry['rest'] = FirewallPoliciesRestTransport + +__all__ = ( + 'FirewallPoliciesTransport', + 'FirewallPoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/base.py new file mode 100644 index 000000000..39422a48b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/base.py @@ -0,0 +1,385 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FirewallPoliciesTransport(abc.ABC): + """Abstract transport class for FirewallPolicies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_association: gapic_v1.method.wrap_method( + self.add_association, + default_timeout=None, + client_info=client_info, + ), + self.add_rule: gapic_v1.method.wrap_method( + self.add_rule, + default_timeout=None, + client_info=client_info, + ), + self.clone_rules: gapic_v1.method.wrap_method( + self.clone_rules, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_association: gapic_v1.method.wrap_method( + self.get_association, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.get_rule: gapic_v1.method.wrap_method( + self.get_rule, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_associations: gapic_v1.method.wrap_method( + self.list_associations, + default_timeout=None, + client_info=client_info, + ), + self.move: gapic_v1.method.wrap_method( + self.move, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.patch_rule: gapic_v1.method.wrap_method( + self.patch_rule, + default_timeout=None, + client_info=client_info, + ), + self.remove_association: gapic_v1.method.wrap_method( + self.remove_association, + default_timeout=None, + client_info=client_info, + ), + self.remove_rule: gapic_v1.method.wrap_method( + self.remove_rule, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_association(self) -> Callable[ + [compute.AddAssociationFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def add_rule(self) -> Callable[ + [compute.AddRuleFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def clone_rules(self) -> Callable[ + [compute.CloneRulesFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetFirewallPolicyRequest], + Union[ + compute.FirewallPolicy, + Awaitable[compute.FirewallPolicy] + ]]: + raise NotImplementedError() + + @property + def get_association(self) -> Callable[ + [compute.GetAssociationFirewallPolicyRequest], + Union[ + compute.FirewallPolicyAssociation, + Awaitable[compute.FirewallPolicyAssociation] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyFirewallPolicyRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def get_rule(self) -> Callable[ + [compute.GetRuleFirewallPolicyRequest], + Union[ + compute.FirewallPolicyRule, + Awaitable[compute.FirewallPolicyRule] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListFirewallPoliciesRequest], + Union[ + compute.FirewallPolicyList, + Awaitable[compute.FirewallPolicyList] + ]]: + raise NotImplementedError() + + @property + def list_associations(self) -> Callable[ + [compute.ListAssociationsFirewallPolicyRequest], + Union[ + compute.FirewallPoliciesListAssociationsResponse, + Awaitable[compute.FirewallPoliciesListAssociationsResponse] + ]]: + raise NotImplementedError() + + @property + def move(self) -> Callable[ + [compute.MoveFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def patch_rule(self) -> Callable[ + [compute.PatchRuleFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def remove_association(self) -> Callable[ + [compute.RemoveAssociationFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def remove_rule(self) -> Callable[ + [compute.RemoveRuleFirewallPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyFirewallPolicyRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsFirewallPolicyRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'FirewallPoliciesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/rest.py new file mode 100644 index 000000000..e63940f67 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewall_policies/transports/rest.py @@ -0,0 +1,2015 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import FirewallPoliciesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class FirewallPoliciesRestTransport(FirewallPoliciesTransport): + """REST backend transport for FirewallPolicies. + + The FirewallPolicies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_association(self, + request: compute.AddAssociationFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add association method over HTTP. + + Args: + request (~.compute.AddAssociationFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.AddAssociation. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/addAssociation', + 'body': 'firewall_policy_association_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.AddAssociationFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.FirewallPolicyAssociation.to_json( + compute.FirewallPolicyAssociation( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddAssociationFirewallPolicyRequest.to_json( + compute.AddAssociationFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _add_rule(self, + request: compute.AddRuleFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add rule method over HTTP. + + Args: + request (~.compute.AddRuleFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.AddRule. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/addRule', + 'body': 'firewall_policy_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.AddRuleFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.FirewallPolicyRule.to_json( + compute.FirewallPolicyRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddRuleFirewallPolicyRequest.to_json( + compute.AddRuleFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _clone_rules(self, + request: compute.CloneRulesFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the clone rules method over HTTP. + + Args: + request (~.compute.CloneRulesFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.CloneRules. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/cloneRules', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.CloneRulesFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.CloneRulesFirewallPolicyRequest.to_json( + compute.CloneRulesFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.DeleteFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteFirewallPolicyRequest.to_json( + compute.DeleteFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.FirewallPolicy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.FirewallPolicy: + Represents a Firewall Policy + resource. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.GetFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetFirewallPolicyRequest.to_json( + compute.GetFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.FirewallPolicy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_association(self, + request: compute.GetAssociationFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.FirewallPolicyAssociation: + r"""Call the get association method over HTTP. + + Args: + request (~.compute.GetAssociationFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.GetAssociation. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.FirewallPolicyAssociation: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/getAssociation', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.GetAssociationFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetAssociationFirewallPolicyRequest.to_json( + compute.GetAssociationFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.FirewallPolicyAssociation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.GetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/firewallPolicies/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyFirewallPolicyRequest.to_json( + compute.GetIamPolicyFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_rule(self, + request: compute.GetRuleFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.FirewallPolicyRule: + r"""Call the get rule method over HTTP. + + Args: + request (~.compute.GetRuleFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.GetRule. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.FirewallPolicyRule: + Represents a rule that describes one + or more match conditions along with the + action to be taken when traffic matches + this condition (allow or deny). + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/getRule', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.GetRuleFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRuleFirewallPolicyRequest.to_json( + compute.GetRuleFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.FirewallPolicyRule.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies', + 'body': 'firewall_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "parent_id", + "parentId" + ), + ] + + request_kwargs = compute.InsertFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.FirewallPolicy.to_json( + compute.FirewallPolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertFirewallPolicyRequest.to_json( + compute.InsertFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListFirewallPoliciesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.FirewallPolicyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListFirewallPoliciesRequest): + The request object. A request message for + FirewallPolicies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.FirewallPolicyList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/firewallPolicies', + }, + ] + + request_kwargs = compute.ListFirewallPoliciesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListFirewallPoliciesRequest.to_json( + compute.ListFirewallPoliciesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.FirewallPolicyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_associations(self, + request: compute.ListAssociationsFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.FirewallPoliciesListAssociationsResponse: + r"""Call the list associations method over HTTP. + + Args: + request (~.compute.ListAssociationsFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.ListAssociations. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.FirewallPoliciesListAssociationsResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/firewallPolicies/listAssociations', + }, + ] + + request_kwargs = compute.ListAssociationsFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListAssociationsFirewallPolicyRequest.to_json( + compute.ListAssociationsFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.FirewallPoliciesListAssociationsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _move(self, + request: compute.MoveFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the move method over HTTP. + + Args: + request (~.compute.MoveFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.Move. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/move', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ( + "parent_id", + "parentId" + ), + ] + + request_kwargs = compute.MoveFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.MoveFirewallPolicyRequest.to_json( + compute.MoveFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}', + 'body': 'firewall_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.PatchFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.FirewallPolicy.to_json( + compute.FirewallPolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchFirewallPolicyRequest.to_json( + compute.PatchFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch_rule(self, + request: compute.PatchRuleFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch rule method over HTTP. + + Args: + request (~.compute.PatchRuleFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.PatchRule. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/patchRule', + 'body': 'firewall_policy_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.PatchRuleFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.FirewallPolicyRule.to_json( + compute.FirewallPolicyRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRuleFirewallPolicyRequest.to_json( + compute.PatchRuleFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_association(self, + request: compute.RemoveAssociationFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove association method over HTTP. + + Args: + request (~.compute.RemoveAssociationFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.RemoveAssociation. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/removeAssociation', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.RemoveAssociationFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveAssociationFirewallPolicyRequest.to_json( + compute.RemoveAssociationFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_rule(self, + request: compute.RemoveRuleFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove rule method over HTTP. + + Args: + request (~.compute.RemoveRuleFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.RemoveRule. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{firewall_policy}/removeRule', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall_policy", + "firewallPolicy" + ), + ] + + request_kwargs = compute.RemoveRuleFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveRuleFirewallPolicyRequest.to_json( + compute.RemoveRuleFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.SetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{resource}/setIamPolicy', + 'body': 'global_organization_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalOrganizationSetPolicyRequest.to_json( + compute.GlobalOrganizationSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyFirewallPolicyRequest.to_json( + compute.SetIamPolicyFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsFirewallPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsFirewallPolicyRequest): + The request object. A request message for + FirewallPolicies.TestIamPermissions. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/locations/global/firewallPolicies/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsFirewallPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsFirewallPolicyRequest.to_json( + compute.TestIamPermissionsFirewallPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_association(self) -> Callable[ + [compute.AddAssociationFirewallPolicyRequest], + compute.Operation]: + return self._add_association + @ property + def add_rule(self) -> Callable[ + [compute.AddRuleFirewallPolicyRequest], + compute.Operation]: + return self._add_rule + @ property + def clone_rules(self) -> Callable[ + [compute.CloneRulesFirewallPolicyRequest], + compute.Operation]: + return self._clone_rules + @ property + def delete(self) -> Callable[ + [compute.DeleteFirewallPolicyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetFirewallPolicyRequest], + compute.FirewallPolicy]: + return self._get + @ property + def get_association(self) -> Callable[ + [compute.GetAssociationFirewallPolicyRequest], + compute.FirewallPolicyAssociation]: + return self._get_association + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyFirewallPolicyRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def get_rule(self) -> Callable[ + [compute.GetRuleFirewallPolicyRequest], + compute.FirewallPolicyRule]: + return self._get_rule + @ property + def insert(self) -> Callable[ + [compute.InsertFirewallPolicyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListFirewallPoliciesRequest], + compute.FirewallPolicyList]: + return self._list + @ property + def list_associations(self) -> Callable[ + [compute.ListAssociationsFirewallPolicyRequest], + compute.FirewallPoliciesListAssociationsResponse]: + return self._list_associations + @ property + def move(self) -> Callable[ + [compute.MoveFirewallPolicyRequest], + compute.Operation]: + return self._move + @ property + def patch(self) -> Callable[ + [compute.PatchFirewallPolicyRequest], + compute.Operation]: + return self._patch + @ property + def patch_rule(self) -> Callable[ + [compute.PatchRuleFirewallPolicyRequest], + compute.Operation]: + return self._patch_rule + @ property + def remove_association(self) -> Callable[ + [compute.RemoveAssociationFirewallPolicyRequest], + compute.Operation]: + return self._remove_association + @ property + def remove_rule(self) -> Callable[ + [compute.RemoveRuleFirewallPolicyRequest], + compute.Operation]: + return self._remove_rule + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyFirewallPolicyRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsFirewallPolicyRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'FirewallPoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/__init__.py new file mode 100644 index 000000000..1fa77bb66 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import FirewallsClient + +__all__ = ( + 'FirewallsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/client.py new file mode 100644 index 000000000..b1c916579 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/client.py @@ -0,0 +1,879 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.firewalls import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import FirewallsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import FirewallsRestTransport + + +class FirewallsClientMeta(type): + """Metaclass for the Firewalls client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[FirewallsTransport]] + _transport_registry["rest"] = FirewallsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[FirewallsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class FirewallsClient(metaclass=FirewallsClientMeta): + """The Firewalls API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FirewallsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + FirewallsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> FirewallsTransport: + """Returns the transport used by the client instance. + + Returns: + FirewallsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, FirewallsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the firewalls client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, FirewallsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, FirewallsTransport): + # transport is a FirewallsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteFirewallRequest, dict] = None, + *, + project: str = None, + firewall: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified firewall. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteFirewallRequest, dict]): + The request object. A request message for + Firewalls.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall (str): + Name of the firewall rule to delete. + This corresponds to the ``firewall`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, firewall]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteFirewallRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteFirewallRequest): + request = compute.DeleteFirewallRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if firewall is not None: + request.firewall = firewall + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetFirewallRequest, dict] = None, + *, + project: str = None, + firewall: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Firewall: + r"""Returns the specified firewall. + + Args: + request (Union[google.cloud.compute_v1.types.GetFirewallRequest, dict]): + The request object. A request message for Firewalls.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall (str): + Name of the firewall rule to return. + This corresponds to the ``firewall`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Firewall: + Represents a Firewall Rule resource. + Firewall rules allow or deny ingress + traffic to, and egress traffic from your + instances. For more information, read + Firewall rules. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, firewall]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetFirewallRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetFirewallRequest): + request = compute.GetFirewallRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if firewall is not None: + request.firewall = firewall + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertFirewallRequest, dict] = None, + *, + project: str = None, + firewall_resource: compute.Firewall = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a firewall rule in the specified project + using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertFirewallRequest, dict]): + The request object. A request message for + Firewalls.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_resource (google.cloud.compute_v1.types.Firewall): + The body resource for this request + This corresponds to the ``firewall_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, firewall_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertFirewallRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertFirewallRequest): + request = compute.InsertFirewallRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if firewall_resource is not None: + request.firewall_resource = firewall_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListFirewallsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of firewall rules available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListFirewallsRequest, dict]): + The request object. A request message for + Firewalls.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.firewalls.pagers.ListPager: + Contains a list of firewalls. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListFirewallsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListFirewallsRequest): + request = compute.ListFirewallsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchFirewallRequest, dict] = None, + *, + project: str = None, + firewall: str = None, + firewall_resource: compute.Firewall = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified firewall rule with the data + included in the request. This method supports PATCH + semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchFirewallRequest, dict]): + The request object. A request message for + Firewalls.Patch. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall (str): + Name of the firewall rule to patch. + This corresponds to the ``firewall`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_resource (google.cloud.compute_v1.types.Firewall): + The body resource for this request + This corresponds to the ``firewall_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, firewall, firewall_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchFirewallRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchFirewallRequest): + request = compute.PatchFirewallRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if firewall is not None: + request.firewall = firewall + if firewall_resource is not None: + request.firewall_resource = firewall_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateFirewallRequest, dict] = None, + *, + project: str = None, + firewall: str = None, + firewall_resource: compute.Firewall = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified firewall rule with the data + included in the request. Note that all fields will be + updated if using PUT, even fields that are not + specified. To update individual fields, please use PATCH + instead. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateFirewallRequest, dict]): + The request object. A request message for + Firewalls.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall (str): + Name of the firewall rule to update. + This corresponds to the ``firewall`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + firewall_resource (google.cloud.compute_v1.types.Firewall): + The body resource for this request + This corresponds to the ``firewall_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, firewall, firewall_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateFirewallRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateFirewallRequest): + request = compute.UpdateFirewallRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if firewall is not None: + request.firewall = firewall + if firewall_resource is not None: + request.firewall_resource = firewall_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "FirewallsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/pagers.py new file mode 100644 index 000000000..a533f33c7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.FirewallList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.FirewallList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.FirewallList], + request: compute.ListFirewallsRequest, + response: compute.FirewallList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListFirewallsRequest): + The initial request object. + response (google.cloud.compute_v1.types.FirewallList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListFirewallsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.FirewallList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Firewall]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/__init__.py new file mode 100644 index 000000000..9d402f4a6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import FirewallsTransport +from .rest import FirewallsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[FirewallsTransport]] +_transport_registry['rest'] = FirewallsRestTransport + +__all__ = ( + 'FirewallsTransport', + 'FirewallsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/base.py new file mode 100644 index 000000000..bec65db61 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class FirewallsTransport(abc.ABC): + """Abstract transport class for Firewalls.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteFirewallRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetFirewallRequest], + Union[ + compute.Firewall, + Awaitable[compute.Firewall] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertFirewallRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListFirewallsRequest], + Union[ + compute.FirewallList, + Awaitable[compute.FirewallList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchFirewallRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateFirewallRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'FirewallsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/rest.py new file mode 100644 index 000000000..f8db02396 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/firewalls/transports/rest.py @@ -0,0 +1,772 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import FirewallsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class FirewallsRestTransport(FirewallsTransport): + """REST backend transport for Firewalls. + + The Firewalls API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteFirewallRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteFirewallRequest): + The request object. A request message for + Firewalls.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/firewalls/{firewall}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall", + "firewall" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteFirewallRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteFirewallRequest.to_json( + compute.DeleteFirewallRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetFirewallRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Firewall: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetFirewallRequest): + The request object. A request message for Firewalls.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Firewall: + Represents a Firewall Rule resource. + Firewall rules allow or deny ingress + traffic to, and egress traffic from your + instances. For more information, read + Firewall rules. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/firewalls/{firewall}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall", + "firewall" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetFirewallRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetFirewallRequest.to_json( + compute.GetFirewallRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Firewall.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertFirewallRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertFirewallRequest): + The request object. A request message for + Firewalls.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/firewalls', + 'body': 'firewall_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertFirewallRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Firewall.to_json( + compute.Firewall( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertFirewallRequest.to_json( + compute.InsertFirewallRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListFirewallsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.FirewallList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListFirewallsRequest): + The request object. A request message for Firewalls.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.FirewallList: + Contains a list of firewalls. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/firewalls', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListFirewallsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListFirewallsRequest.to_json( + compute.ListFirewallsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.FirewallList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchFirewallRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchFirewallRequest): + The request object. A request message for + Firewalls.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/firewalls/{firewall}', + 'body': 'firewall_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall", + "firewall" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchFirewallRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Firewall.to_json( + compute.Firewall( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchFirewallRequest.to_json( + compute.PatchFirewallRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateFirewallRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateFirewallRequest): + The request object. A request message for + Firewalls.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/global/firewalls/{firewall}', + 'body': 'firewall_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "firewall", + "firewall" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.UpdateFirewallRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Firewall.to_json( + compute.Firewall( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateFirewallRequest.to_json( + compute.UpdateFirewallRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteFirewallRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetFirewallRequest], + compute.Firewall]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertFirewallRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListFirewallsRequest], + compute.FirewallList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchFirewallRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateFirewallRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'FirewallsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/__init__.py new file mode 100644 index 000000000..97692b4e0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ForwardingRulesClient + +__all__ = ( + 'ForwardingRulesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/client.py new file mode 100644 index 000000000..ab3ccc414 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/client.py @@ -0,0 +1,1139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.forwarding_rules import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ForwardingRulesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ForwardingRulesRestTransport + + +class ForwardingRulesClientMeta(type): + """Metaclass for the ForwardingRules client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ForwardingRulesTransport]] + _transport_registry["rest"] = ForwardingRulesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ForwardingRulesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ForwardingRulesClient(metaclass=ForwardingRulesClientMeta): + """The ForwardingRules API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ForwardingRulesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ForwardingRulesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ForwardingRulesTransport: + """Returns the transport used by the client instance. + + Returns: + ForwardingRulesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ForwardingRulesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the forwarding rules client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ForwardingRulesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ForwardingRulesTransport): + # transport is a ForwardingRulesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListForwardingRulesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of forwarding rules. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListForwardingRulesRequest, dict]): + The request object. A request message for + ForwardingRules.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.forwarding_rules.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListForwardingRulesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListForwardingRulesRequest): + request = compute.AggregatedListForwardingRulesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteForwardingRuleRequest, dict] = None, + *, + project: str = None, + region: str = None, + forwarding_rule: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified ForwardingRule resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteForwardingRuleRequest, dict]): + The request object. A request message for + ForwardingRules.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + to delete. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, forwarding_rule]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteForwardingRuleRequest): + request = compute.DeleteForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetForwardingRuleRequest, dict] = None, + *, + project: str = None, + region: str = None, + forwarding_rule: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.ForwardingRule: + r"""Returns the specified ForwardingRule resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetForwardingRuleRequest, dict]): + The request object. A request message for + ForwardingRules.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + to return. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.ForwardingRule: + Represents a Forwarding Rule resource. Forwarding rule + resources in Google Cloud can be either regional or + global in scope: \* + [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) + \* + [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) + A forwarding rule and its corresponding IP address + represent the frontend configuration of a Google Cloud + Platform load balancer. Forwarding rules can also + reference target instances and Cloud VPN Classic + gateways (targetVpnGateway). For more information, read + Forwarding rule concepts and Using protocol forwarding. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, forwarding_rule]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetForwardingRuleRequest): + request = compute.GetForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertForwardingRuleRequest, dict] = None, + *, + project: str = None, + region: str = None, + forwarding_rule_resource: compute.ForwardingRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a ForwardingRule resource in the specified + project and region using the data included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertForwardingRuleRequest, dict]): + The request object. A request message for + ForwardingRules.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + This corresponds to the ``forwarding_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, forwarding_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertForwardingRuleRequest): + request = compute.InsertForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if forwarding_rule_resource is not None: + request.forwarding_rule_resource = forwarding_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListForwardingRulesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of ForwardingRule resources + available to the specified project and region. + + Args: + request (Union[google.cloud.compute_v1.types.ListForwardingRulesRequest, dict]): + The request object. A request message for + ForwardingRules.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.forwarding_rules.pagers.ListPager: + Contains a list of ForwardingRule + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListForwardingRulesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListForwardingRulesRequest): + request = compute.ListForwardingRulesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchForwardingRuleRequest, dict] = None, + *, + project: str = None, + region: str = None, + forwarding_rule: str = None, + forwarding_rule_resource: compute.ForwardingRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified forwarding rule with the data included in + the request. This method supports PATCH semantics and uses the + JSON merge patch format and processing rules. Currently, you can + only patch the network_tier field. + + Args: + request (Union[google.cloud.compute_v1.types.PatchForwardingRuleRequest, dict]): + The request object. A request message for + ForwardingRules.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + to patch. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + This corresponds to the ``forwarding_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, forwarding_rule, forwarding_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchForwardingRuleRequest): + request = compute.PatchForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + if forwarding_rule_resource is not None: + request.forwarding_rule_resource = forwarding_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsForwardingRuleRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_labels_request_resource: compute.RegionSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on the specified resource. To learn + more about labels, read the Labeling Resources + documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsForwardingRuleRequest, dict]): + The request object. A request message for + ForwardingRules.SetLabels. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_labels_request_resource (google.cloud.compute_v1.types.RegionSetLabelsRequest): + The body resource for this request + This corresponds to the ``region_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsForwardingRuleRequest): + request = compute.SetLabelsForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_labels_request_resource is not None: + request.region_set_labels_request_resource = region_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_target(self, + request: Union[compute.SetTargetForwardingRuleRequest, dict] = None, + *, + project: str = None, + region: str = None, + forwarding_rule: str = None, + target_reference_resource: compute.TargetReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes target URL for forwarding rule. The new + target should be of the same type as the old target. + + Args: + request (Union[google.cloud.compute_v1.types.SetTargetForwardingRuleRequest, dict]): + The request object. A request message for + ForwardingRules.SetTarget. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + in which target is to be set. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_reference_resource (google.cloud.compute_v1.types.TargetReference): + The body resource for this request + This corresponds to the ``target_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, forwarding_rule, target_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetTargetForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetTargetForwardingRuleRequest): + request = compute.SetTargetForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + if target_reference_resource is not None: + request.target_reference_resource = target_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_target] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ForwardingRulesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/pagers.py new file mode 100644 index 000000000..8acbd5b5e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ForwardingRuleAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ForwardingRuleAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ForwardingRuleAggregatedList], + request: compute.AggregatedListForwardingRulesRequest, + response: compute.ForwardingRuleAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListForwardingRulesRequest): + The initial request object. + response (google.cloud.compute_v1.types.ForwardingRuleAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListForwardingRulesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ForwardingRuleAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.ForwardingRulesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.ForwardingRulesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ForwardingRuleList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ForwardingRuleList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ForwardingRuleList], + request: compute.ListForwardingRulesRequest, + response: compute.ForwardingRuleList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListForwardingRulesRequest): + The initial request object. + response (google.cloud.compute_v1.types.ForwardingRuleList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListForwardingRulesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ForwardingRuleList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ForwardingRule]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/__init__.py new file mode 100644 index 000000000..e0f6dc0f1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ForwardingRulesTransport +from .rest import ForwardingRulesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ForwardingRulesTransport]] +_transport_registry['rest'] = ForwardingRulesRestTransport + +__all__ = ( + 'ForwardingRulesTransport', + 'ForwardingRulesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/base.py new file mode 100644 index 000000000..bb629750c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/base.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ForwardingRulesTransport(abc.ABC): + """Abstract transport class for ForwardingRules.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.set_target: gapic_v1.method.wrap_method( + self.set_target, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListForwardingRulesRequest], + Union[ + compute.ForwardingRuleAggregatedList, + Awaitable[compute.ForwardingRuleAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetForwardingRuleRequest], + Union[ + compute.ForwardingRule, + Awaitable[compute.ForwardingRule] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListForwardingRulesRequest], + Union[ + compute.ForwardingRuleList, + Awaitable[compute.ForwardingRuleList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_target(self) -> Callable[ + [compute.SetTargetForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ForwardingRulesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/rest.py new file mode 100644 index 000000000..24aa48f29 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/forwarding_rules/transports/rest.py @@ -0,0 +1,1019 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ForwardingRulesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ForwardingRulesRestTransport(ForwardingRulesTransport): + """REST backend transport for ForwardingRules. + + The ForwardingRules API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListForwardingRulesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ForwardingRuleAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListForwardingRulesRequest): + The request object. A request message for + ForwardingRules.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ForwardingRuleAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/forwardingRules', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListForwardingRulesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListForwardingRulesRequest.to_json( + compute.AggregatedListForwardingRulesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ForwardingRuleAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteForwardingRuleRequest): + The request object. A request message for + ForwardingRules.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteForwardingRuleRequest.to_json( + compute.DeleteForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ForwardingRule: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetForwardingRuleRequest): + The request object. A request message for + ForwardingRules.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ForwardingRule: + Represents a Forwarding Rule resource. Forwarding rule + resources in Google Cloud can be either regional or + global in scope: \* + `Global `__ + \* + `Regional `__ + A forwarding rule and its corresponding IP address + represent the frontend configuration of a Google Cloud + Platform load balancer. Forwarding rules can also + reference target instances and Cloud VPN Classic + gateways (targetVpnGateway). For more information, read + Forwarding rule concepts and Using protocol forwarding. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetForwardingRuleRequest.to_json( + compute.GetForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ForwardingRule.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertForwardingRuleRequest): + The request object. A request message for + ForwardingRules.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/forwardingRules', + 'body': 'forwarding_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ForwardingRule.to_json( + compute.ForwardingRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertForwardingRuleRequest.to_json( + compute.InsertForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListForwardingRulesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ForwardingRuleList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListForwardingRulesRequest): + The request object. A request message for + ForwardingRules.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ForwardingRuleList: + Contains a list of ForwardingRule + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/forwardingRules', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListForwardingRulesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListForwardingRulesRequest.to_json( + compute.ListForwardingRulesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ForwardingRuleList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchForwardingRuleRequest): + The request object. A request message for + ForwardingRules.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}', + 'body': 'forwarding_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ForwardingRule.to_json( + compute.ForwardingRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchForwardingRuleRequest.to_json( + compute.PatchForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsForwardingRuleRequest): + The request object. A request message for + ForwardingRules.SetLabels. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels', + 'body': 'region_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetLabelsForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetLabelsRequest.to_json( + compute.RegionSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsForwardingRuleRequest.to_json( + compute.SetLabelsForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_target(self, + request: compute.SetTargetForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set target method over HTTP. + + Args: + request (~.compute.SetTargetForwardingRuleRequest): + The request object. A request message for + ForwardingRules.SetTarget. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}/setTarget', + 'body': 'target_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.SetTargetForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetReference.to_json( + compute.TargetReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetTargetForwardingRuleRequest.to_json( + compute.SetTargetForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListForwardingRulesRequest], + compute.ForwardingRuleAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteForwardingRuleRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetForwardingRuleRequest], + compute.ForwardingRule]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertForwardingRuleRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListForwardingRulesRequest], + compute.ForwardingRuleList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchForwardingRuleRequest], + compute.Operation]: + return self._patch + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsForwardingRuleRequest], + compute.Operation]: + return self._set_labels + @ property + def set_target(self) -> Callable[ + [compute.SetTargetForwardingRuleRequest], + compute.Operation]: + return self._set_target + def close(self): + self._session.close() + + +__all__=( + 'ForwardingRulesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/__init__.py new file mode 100644 index 000000000..fd45fff49 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GlobalAddressesClient + +__all__ = ( + 'GlobalAddressesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/client.py new file mode 100644 index 000000000..67a2db0b2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/client.py @@ -0,0 +1,691 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.global_addresses import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import GlobalAddressesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import GlobalAddressesRestTransport + + +class GlobalAddressesClientMeta(type): + """Metaclass for the GlobalAddresses client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[GlobalAddressesTransport]] + _transport_registry["rest"] = GlobalAddressesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[GlobalAddressesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GlobalAddressesClient(metaclass=GlobalAddressesClientMeta): + """The GlobalAddresses API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalAddressesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalAddressesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GlobalAddressesTransport: + """Returns the transport used by the client instance. + + Returns: + GlobalAddressesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GlobalAddressesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the global addresses client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GlobalAddressesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GlobalAddressesTransport): + # transport is a GlobalAddressesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteGlobalAddressRequest, dict] = None, + *, + project: str = None, + address: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified address resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteGlobalAddressRequest, dict]): + The request object. A request message for + GlobalAddresses.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + address (str): + Name of the address resource to + delete. + + This corresponds to the ``address`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, address]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteGlobalAddressRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteGlobalAddressRequest): + request = compute.DeleteGlobalAddressRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if address is not None: + request.address = address + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetGlobalAddressRequest, dict] = None, + *, + project: str = None, + address: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Address: + r"""Returns the specified address resource. Gets a list + of available addresses by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetGlobalAddressRequest, dict]): + The request object. A request message for + GlobalAddresses.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + address (str): + Name of the address resource to + return. + + This corresponds to the ``address`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Address: + Represents an IP Address resource. Google Compute Engine + has two IP Address resources: \* [Global (external and + internal)](\ https://cloud.google.com/compute/docs/reference/rest/v1/globalAddresses) + \* [Regional (external and + internal)](\ https://cloud.google.com/compute/docs/reference/rest/v1/addresses) + For more information, see Reserving a static external IP + address. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, address]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetGlobalAddressRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetGlobalAddressRequest): + request = compute.GetGlobalAddressRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if address is not None: + request.address = address + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertGlobalAddressRequest, dict] = None, + *, + project: str = None, + address_resource: compute.Address = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an address resource in the specified project + by using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertGlobalAddressRequest, dict]): + The request object. A request message for + GlobalAddresses.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + address_resource (google.cloud.compute_v1.types.Address): + The body resource for this request + This corresponds to the ``address_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, address_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertGlobalAddressRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertGlobalAddressRequest): + request = compute.InsertGlobalAddressRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if address_resource is not None: + request.address_resource = address_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListGlobalAddressesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of global addresses. + + Args: + request (Union[google.cloud.compute_v1.types.ListGlobalAddressesRequest, dict]): + The request object. A request message for + GlobalAddresses.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_addresses.pagers.ListPager: + Contains a list of addresses. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListGlobalAddressesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListGlobalAddressesRequest): + request = compute.ListGlobalAddressesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "GlobalAddressesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/pagers.py new file mode 100644 index 000000000..fe95413e0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.AddressList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.AddressList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.AddressList], + request: compute.ListGlobalAddressesRequest, + response: compute.AddressList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListGlobalAddressesRequest): + The initial request object. + response (google.cloud.compute_v1.types.AddressList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListGlobalAddressesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.AddressList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Address]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/__init__.py new file mode 100644 index 000000000..630574422 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GlobalAddressesTransport +from .rest import GlobalAddressesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GlobalAddressesTransport]] +_transport_registry['rest'] = GlobalAddressesRestTransport + +__all__ = ( + 'GlobalAddressesTransport', + 'GlobalAddressesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/base.py new file mode 100644 index 000000000..d7284b1de --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class GlobalAddressesTransport(abc.ABC): + """Abstract transport class for GlobalAddresses.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteGlobalAddressRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetGlobalAddressRequest], + Union[ + compute.Address, + Awaitable[compute.Address] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertGlobalAddressRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListGlobalAddressesRequest], + Union[ + compute.AddressList, + Awaitable[compute.AddressList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'GlobalAddressesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/rest.py new file mode 100644 index 000000000..31418cf2b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_addresses/transports/rest.py @@ -0,0 +1,540 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import GlobalAddressesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class GlobalAddressesRestTransport(GlobalAddressesTransport): + """REST backend transport for GlobalAddresses. + + The GlobalAddresses API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteGlobalAddressRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteGlobalAddressRequest): + The request object. A request message for + GlobalAddresses.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/addresses/{address}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "address", + "address" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteGlobalAddressRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteGlobalAddressRequest.to_json( + compute.DeleteGlobalAddressRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetGlobalAddressRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Address: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetGlobalAddressRequest): + The request object. A request message for + GlobalAddresses.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Address: + Represents an IP Address resource. Google Compute Engine + has two IP Address resources: \* `Global (external and + internal) `__ + \* `Regional (external and + internal) `__ + For more information, see Reserving a static external IP + address. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/addresses/{address}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "address", + "address" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetGlobalAddressRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetGlobalAddressRequest.to_json( + compute.GetGlobalAddressRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Address.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertGlobalAddressRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertGlobalAddressRequest): + The request object. A request message for + GlobalAddresses.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/addresses', + 'body': 'address_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertGlobalAddressRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Address.to_json( + compute.Address( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertGlobalAddressRequest.to_json( + compute.InsertGlobalAddressRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListGlobalAddressesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.AddressList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListGlobalAddressesRequest): + The request object. A request message for + GlobalAddresses.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.AddressList: + Contains a list of addresses. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/addresses', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListGlobalAddressesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListGlobalAddressesRequest.to_json( + compute.ListGlobalAddressesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.AddressList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteGlobalAddressRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetGlobalAddressRequest], + compute.Address]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertGlobalAddressRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListGlobalAddressesRequest], + compute.AddressList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'GlobalAddressesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/__init__.py new file mode 100644 index 000000000..f2a326dc0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GlobalForwardingRulesClient + +__all__ = ( + 'GlobalForwardingRulesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/client.py new file mode 100644 index 000000000..594f58890 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/client.py @@ -0,0 +1,998 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.global_forwarding_rules import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import GlobalForwardingRulesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import GlobalForwardingRulesRestTransport + + +class GlobalForwardingRulesClientMeta(type): + """Metaclass for the GlobalForwardingRules client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[GlobalForwardingRulesTransport]] + _transport_registry["rest"] = GlobalForwardingRulesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[GlobalForwardingRulesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GlobalForwardingRulesClient(metaclass=GlobalForwardingRulesClientMeta): + """The GlobalForwardingRules API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalForwardingRulesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalForwardingRulesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GlobalForwardingRulesTransport: + """Returns the transport used by the client instance. + + Returns: + GlobalForwardingRulesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GlobalForwardingRulesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the global forwarding rules client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GlobalForwardingRulesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GlobalForwardingRulesTransport): + # transport is a GlobalForwardingRulesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteGlobalForwardingRuleRequest, dict] = None, + *, + project: str = None, + forwarding_rule: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified GlobalForwardingRule resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteGlobalForwardingRuleRequest, dict]): + The request object. A request message for + GlobalForwardingRules.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + to delete. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, forwarding_rule]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteGlobalForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteGlobalForwardingRuleRequest): + request = compute.DeleteGlobalForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetGlobalForwardingRuleRequest, dict] = None, + *, + project: str = None, + forwarding_rule: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.ForwardingRule: + r"""Returns the specified GlobalForwardingRule resource. + Gets a list of available forwarding rules by making a + list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetGlobalForwardingRuleRequest, dict]): + The request object. A request message for + GlobalForwardingRules.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + to return. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.ForwardingRule: + Represents a Forwarding Rule resource. Forwarding rule + resources in Google Cloud can be either regional or + global in scope: \* + [Global](https://cloud.google.com/compute/docs/reference/rest/v1/globalForwardingRules) + \* + [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules) + A forwarding rule and its corresponding IP address + represent the frontend configuration of a Google Cloud + Platform load balancer. Forwarding rules can also + reference target instances and Cloud VPN Classic + gateways (targetVpnGateway). For more information, read + Forwarding rule concepts and Using protocol forwarding. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, forwarding_rule]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetGlobalForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetGlobalForwardingRuleRequest): + request = compute.GetGlobalForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertGlobalForwardingRuleRequest, dict] = None, + *, + project: str = None, + forwarding_rule_resource: compute.ForwardingRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a GlobalForwardingRule resource in the + specified project using the data included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertGlobalForwardingRuleRequest, dict]): + The request object. A request message for + GlobalForwardingRules.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + This corresponds to the ``forwarding_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, forwarding_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertGlobalForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertGlobalForwardingRuleRequest): + request = compute.InsertGlobalForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if forwarding_rule_resource is not None: + request.forwarding_rule_resource = forwarding_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListGlobalForwardingRulesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of GlobalForwardingRule resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListGlobalForwardingRulesRequest, dict]): + The request object. A request message for + GlobalForwardingRules.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_forwarding_rules.pagers.ListPager: + Contains a list of ForwardingRule + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListGlobalForwardingRulesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListGlobalForwardingRulesRequest): + request = compute.ListGlobalForwardingRulesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchGlobalForwardingRuleRequest, dict] = None, + *, + project: str = None, + forwarding_rule: str = None, + forwarding_rule_resource: compute.ForwardingRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified forwarding rule with the data included in + the request. This method supports PATCH semantics and uses the + JSON merge patch format and processing rules. Currently, you can + only patch the network_tier field. + + Args: + request (Union[google.cloud.compute_v1.types.PatchGlobalForwardingRuleRequest, dict]): + The request object. A request message for + GlobalForwardingRules.Patch. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + to patch. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + This corresponds to the ``forwarding_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, forwarding_rule, forwarding_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchGlobalForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchGlobalForwardingRuleRequest): + request = compute.PatchGlobalForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + if forwarding_rule_resource is not None: + request.forwarding_rule_resource = forwarding_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsGlobalForwardingRuleRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_labels_request_resource: compute.GlobalSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on the specified resource. To learn + more about labels, read the Labeling resources + documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsGlobalForwardingRuleRequest, dict]): + The request object. A request message for + GlobalForwardingRules.SetLabels. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + This corresponds to the ``global_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsGlobalForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsGlobalForwardingRuleRequest): + request = compute.SetLabelsGlobalForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_labels_request_resource is not None: + request.global_set_labels_request_resource = global_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_target(self, + request: Union[compute.SetTargetGlobalForwardingRuleRequest, dict] = None, + *, + project: str = None, + forwarding_rule: str = None, + target_reference_resource: compute.TargetReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes target URL for the GlobalForwardingRule + resource. The new target should be of the same type as + the old target. + + Args: + request (Union[google.cloud.compute_v1.types.SetTargetGlobalForwardingRuleRequest, dict]): + The request object. A request message for + GlobalForwardingRules.SetTarget. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + forwarding_rule (str): + Name of the ForwardingRule resource + in which target is to be set. + + This corresponds to the ``forwarding_rule`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_reference_resource (google.cloud.compute_v1.types.TargetReference): + The body resource for this request + This corresponds to the ``target_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, forwarding_rule, target_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetTargetGlobalForwardingRuleRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetTargetGlobalForwardingRuleRequest): + request = compute.SetTargetGlobalForwardingRuleRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if forwarding_rule is not None: + request.forwarding_rule = forwarding_rule + if target_reference_resource is not None: + request.target_reference_resource = target_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_target] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "GlobalForwardingRulesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/pagers.py new file mode 100644 index 000000000..c993702ad --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ForwardingRuleList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ForwardingRuleList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ForwardingRuleList], + request: compute.ListGlobalForwardingRulesRequest, + response: compute.ForwardingRuleList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListGlobalForwardingRulesRequest): + The initial request object. + response (google.cloud.compute_v1.types.ForwardingRuleList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListGlobalForwardingRulesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ForwardingRuleList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ForwardingRule]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/__init__.py new file mode 100644 index 000000000..59c7ae52c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GlobalForwardingRulesTransport +from .rest import GlobalForwardingRulesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GlobalForwardingRulesTransport]] +_transport_registry['rest'] = GlobalForwardingRulesRestTransport + +__all__ = ( + 'GlobalForwardingRulesTransport', + 'GlobalForwardingRulesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/base.py new file mode 100644 index 000000000..30594c3b8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class GlobalForwardingRulesTransport(abc.ABC): + """Abstract transport class for GlobalForwardingRules.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.set_target: gapic_v1.method.wrap_method( + self.set_target, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteGlobalForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetGlobalForwardingRuleRequest], + Union[ + compute.ForwardingRule, + Awaitable[compute.ForwardingRule] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertGlobalForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListGlobalForwardingRulesRequest], + Union[ + compute.ForwardingRuleList, + Awaitable[compute.ForwardingRuleList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchGlobalForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsGlobalForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_target(self) -> Callable[ + [compute.SetTargetGlobalForwardingRuleRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'GlobalForwardingRulesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/rest.py new file mode 100644 index 000000000..23aba4be8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_forwarding_rules/transports/rest.py @@ -0,0 +1,901 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import GlobalForwardingRulesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class GlobalForwardingRulesRestTransport(GlobalForwardingRulesTransport): + """REST backend transport for GlobalForwardingRules. + + The GlobalForwardingRules API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteGlobalForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteGlobalForwardingRuleRequest): + The request object. A request message for + GlobalForwardingRules.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteGlobalForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteGlobalForwardingRuleRequest.to_json( + compute.DeleteGlobalForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetGlobalForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ForwardingRule: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetGlobalForwardingRuleRequest): + The request object. A request message for + GlobalForwardingRules.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ForwardingRule: + Represents a Forwarding Rule resource. Forwarding rule + resources in Google Cloud can be either regional or + global in scope: \* + `Global `__ + \* + `Regional `__ + A forwarding rule and its corresponding IP address + represent the frontend configuration of a Google Cloud + Platform load balancer. Forwarding rules can also + reference target instances and Cloud VPN Classic + gateways (targetVpnGateway). For more information, read + Forwarding rule concepts and Using protocol forwarding. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetGlobalForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetGlobalForwardingRuleRequest.to_json( + compute.GetGlobalForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ForwardingRule.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertGlobalForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertGlobalForwardingRuleRequest): + The request object. A request message for + GlobalForwardingRules.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/forwardingRules', + 'body': 'forwarding_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertGlobalForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ForwardingRule.to_json( + compute.ForwardingRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertGlobalForwardingRuleRequest.to_json( + compute.InsertGlobalForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListGlobalForwardingRulesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ForwardingRuleList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListGlobalForwardingRulesRequest): + The request object. A request message for + GlobalForwardingRules.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ForwardingRuleList: + Contains a list of ForwardingRule + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/forwardingRules', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListGlobalForwardingRulesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListGlobalForwardingRulesRequest.to_json( + compute.ListGlobalForwardingRulesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ForwardingRuleList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchGlobalForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchGlobalForwardingRuleRequest): + The request object. A request message for + GlobalForwardingRules.Patch. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}', + 'body': 'forwarding_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchGlobalForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ForwardingRule.to_json( + compute.ForwardingRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchGlobalForwardingRuleRequest.to_json( + compute.PatchGlobalForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsGlobalForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsGlobalForwardingRuleRequest): + The request object. A request message for + GlobalForwardingRules.SetLabels. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/forwardingRules/{resource}/setLabels', + 'body': 'global_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetLabelsGlobalForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetLabelsRequest.to_json( + compute.GlobalSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsGlobalForwardingRuleRequest.to_json( + compute.SetLabelsGlobalForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_target(self, + request: compute.SetTargetGlobalForwardingRuleRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set target method over HTTP. + + Args: + request (~.compute.SetTargetGlobalForwardingRuleRequest): + The request object. A request message for + GlobalForwardingRules.SetTarget. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}/setTarget', + 'body': 'target_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "forwarding_rule", + "forwardingRule" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.SetTargetGlobalForwardingRuleRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetReference.to_json( + compute.TargetReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetTargetGlobalForwardingRuleRequest.to_json( + compute.SetTargetGlobalForwardingRuleRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteGlobalForwardingRuleRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetGlobalForwardingRuleRequest], + compute.ForwardingRule]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertGlobalForwardingRuleRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListGlobalForwardingRulesRequest], + compute.ForwardingRuleList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchGlobalForwardingRuleRequest], + compute.Operation]: + return self._patch + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsGlobalForwardingRuleRequest], + compute.Operation]: + return self._set_labels + @ property + def set_target(self) -> Callable[ + [compute.SetTargetGlobalForwardingRuleRequest], + compute.Operation]: + return self._set_target + def close(self): + self._session.close() + + +__all__=( + 'GlobalForwardingRulesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/__init__.py new file mode 100644 index 000000000..620535bf5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GlobalNetworkEndpointGroupsClient + +__all__ = ( + 'GlobalNetworkEndpointGroupsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/client.py new file mode 100644 index 000000000..18a7da00e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/client.py @@ -0,0 +1,989 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.global_network_endpoint_groups import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import GlobalNetworkEndpointGroupsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import GlobalNetworkEndpointGroupsRestTransport + + +class GlobalNetworkEndpointGroupsClientMeta(type): + """Metaclass for the GlobalNetworkEndpointGroups client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[GlobalNetworkEndpointGroupsTransport]] + _transport_registry["rest"] = GlobalNetworkEndpointGroupsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[GlobalNetworkEndpointGroupsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GlobalNetworkEndpointGroupsClient(metaclass=GlobalNetworkEndpointGroupsClientMeta): + """The GlobalNetworkEndpointGroups API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalNetworkEndpointGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalNetworkEndpointGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GlobalNetworkEndpointGroupsTransport: + """Returns the transport used by the client instance. + + Returns: + GlobalNetworkEndpointGroupsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GlobalNetworkEndpointGroupsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the global network endpoint groups client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GlobalNetworkEndpointGroupsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GlobalNetworkEndpointGroupsTransport): + # transport is a GlobalNetworkEndpointGroupsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def attach_network_endpoints(self, + request: Union[compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + network_endpoint_group: str = None, + global_network_endpoint_groups_attach_endpoints_request_resource: compute.GlobalNetworkEndpointGroupsAttachEndpointsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Attach a network endpoint to the specified network + endpoint group. + + Args: + request (Union[google.cloud.compute_v1.types.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest, dict]): + The request object. A request message for + GlobalNetworkEndpointGroups.AttachNetworkEndpoints. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group where you are attaching network + endpoints to. It should comply with + RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_network_endpoint_groups_attach_endpoints_request_resource (google.cloud.compute_v1.types.GlobalNetworkEndpointGroupsAttachEndpointsRequest): + The body resource for this request + This corresponds to the ``global_network_endpoint_groups_attach_endpoints_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network_endpoint_group, global_network_endpoint_groups_attach_endpoints_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + request = compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + if global_network_endpoint_groups_attach_endpoints_request_resource is not None: + request.global_network_endpoint_groups_attach_endpoints_request_resource = global_network_endpoint_groups_attach_endpoints_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.attach_network_endpoints] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteGlobalNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + network_endpoint_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified network endpoint group.Note + that the NEG cannot be deleted if there are backend + services referencing it. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteGlobalNetworkEndpointGroupRequest, dict]): + The request object. A request message for + GlobalNetworkEndpointGroups.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group to delete. It should comply with + RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network_endpoint_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteGlobalNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteGlobalNetworkEndpointGroupRequest): + request = compute.DeleteGlobalNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def detach_network_endpoints(self, + request: Union[compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + network_endpoint_group: str = None, + global_network_endpoint_groups_detach_endpoints_request_resource: compute.GlobalNetworkEndpointGroupsDetachEndpointsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Detach the network endpoint from the specified + network endpoint group. + + Args: + request (Union[google.cloud.compute_v1.types.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest, dict]): + The request object. A request message for + GlobalNetworkEndpointGroups.DetachNetworkEndpoints. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group where you are removing network + endpoints. It should comply with + RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_network_endpoint_groups_detach_endpoints_request_resource (google.cloud.compute_v1.types.GlobalNetworkEndpointGroupsDetachEndpointsRequest): + The body resource for this request + This corresponds to the ``global_network_endpoint_groups_detach_endpoints_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network_endpoint_group, global_network_endpoint_groups_detach_endpoints_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + request = compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + if global_network_endpoint_groups_detach_endpoints_request_resource is not None: + request.global_network_endpoint_groups_detach_endpoints_request_resource = global_network_endpoint_groups_detach_endpoints_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.detach_network_endpoints] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetGlobalNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + network_endpoint_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NetworkEndpointGroup: + r"""Returns the specified network endpoint group. Gets a + list of available network endpoint groups by making a + list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetGlobalNetworkEndpointGroupRequest, dict]): + The request object. A request message for + GlobalNetworkEndpointGroups.Get. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group. It should comply with RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NetworkEndpointGroup: + Represents a collection of network + endpoints. A network endpoint group + (NEG) defines how a set of endpoints + should be reached, whether they are + reachable, and where they are located. + For more information about using NEGs, + see Setting up external HTTP(S) Load + Balancing with internet NEGs, Setting up + zonal NEGs, or Setting up external + HTTP(S) Load Balancing with serverless + NEGs. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network_endpoint_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetGlobalNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetGlobalNetworkEndpointGroupRequest): + request = compute.GetGlobalNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertGlobalNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + network_endpoint_group_resource: compute.NetworkEndpointGroup = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a network endpoint group in the specified + project using the parameters that are included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertGlobalNetworkEndpointGroupRequest, dict]): + The request object. A request message for + GlobalNetworkEndpointGroups.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group_resource (google.cloud.compute_v1.types.NetworkEndpointGroup): + The body resource for this request + This corresponds to the ``network_endpoint_group_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network_endpoint_group_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertGlobalNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertGlobalNetworkEndpointGroupRequest): + request = compute.InsertGlobalNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network_endpoint_group_resource is not None: + request.network_endpoint_group_resource = network_endpoint_group_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListGlobalNetworkEndpointGroupsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of network endpoint groups that + are located in the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListGlobalNetworkEndpointGroupsRequest, dict]): + The request object. A request message for + GlobalNetworkEndpointGroups.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_network_endpoint_groups.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListGlobalNetworkEndpointGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListGlobalNetworkEndpointGroupsRequest): + request = compute.ListGlobalNetworkEndpointGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_network_endpoints(self, + request: Union[compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest, dict] = None, + *, + project: str = None, + network_endpoint_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNetworkEndpointsPager: + r"""Lists the network endpoints in the specified network + endpoint group. + + Args: + request (Union[google.cloud.compute_v1.types.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest, dict]): + The request object. A request message for + GlobalNetworkEndpointGroups.ListNetworkEndpoints. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group from which you want to generate a + list of included network endpoints. It + should comply with RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_network_endpoint_groups.pagers.ListNetworkEndpointsPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network_endpoint_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest): + request = compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_network_endpoints] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNetworkEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "GlobalNetworkEndpointGroupsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/pagers.py new file mode 100644 index 000000000..b08f24f6a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/pagers.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NetworkEndpointGroupList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NetworkEndpointGroupList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NetworkEndpointGroupList], + request: compute.ListGlobalNetworkEndpointGroupsRequest, + response: compute.NetworkEndpointGroupList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListGlobalNetworkEndpointGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NetworkEndpointGroupList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListGlobalNetworkEndpointGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NetworkEndpointGroupList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NetworkEndpointGroup]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListNetworkEndpointsPager: + """A pager for iterating through ``list_network_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NetworkEndpointGroupsListNetworkEndpoints` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNetworkEndpoints`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NetworkEndpointGroupsListNetworkEndpoints` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NetworkEndpointGroupsListNetworkEndpoints], + request: compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest, + response: compute.NetworkEndpointGroupsListNetworkEndpoints, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NetworkEndpointGroupsListNetworkEndpoints): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NetworkEndpointGroupsListNetworkEndpoints]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NetworkEndpointWithHealthStatus]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/__init__.py new file mode 100644 index 000000000..16390d747 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GlobalNetworkEndpointGroupsTransport +from .rest import GlobalNetworkEndpointGroupsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GlobalNetworkEndpointGroupsTransport]] +_transport_registry['rest'] = GlobalNetworkEndpointGroupsRestTransport + +__all__ = ( + 'GlobalNetworkEndpointGroupsTransport', + 'GlobalNetworkEndpointGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/base.py new file mode 100644 index 000000000..f31fa4c0b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class GlobalNetworkEndpointGroupsTransport(abc.ABC): + """Abstract transport class for GlobalNetworkEndpointGroups.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.attach_network_endpoints: gapic_v1.method.wrap_method( + self.attach_network_endpoints, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.detach_network_endpoints: gapic_v1.method.wrap_method( + self.detach_network_endpoints, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_network_endpoints: gapic_v1.method.wrap_method( + self.list_network_endpoints, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def attach_network_endpoints(self) -> Callable[ + [compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteGlobalNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def detach_network_endpoints(self) -> Callable[ + [compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetGlobalNetworkEndpointGroupRequest], + Union[ + compute.NetworkEndpointGroup, + Awaitable[compute.NetworkEndpointGroup] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertGlobalNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListGlobalNetworkEndpointGroupsRequest], + Union[ + compute.NetworkEndpointGroupList, + Awaitable[compute.NetworkEndpointGroupList] + ]]: + raise NotImplementedError() + + @property + def list_network_endpoints(self) -> Callable[ + [compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest], + Union[ + compute.NetworkEndpointGroupsListNetworkEndpoints, + Awaitable[compute.NetworkEndpointGroupsListNetworkEndpoints] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'GlobalNetworkEndpointGroupsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/rest.py new file mode 100644 index 000000000..a34a6ffa6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_network_endpoint_groups/transports/rest.py @@ -0,0 +1,874 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import GlobalNetworkEndpointGroupsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class GlobalNetworkEndpointGroupsRestTransport(GlobalNetworkEndpointGroupsTransport): + """REST backend transport for GlobalNetworkEndpointGroups. + + The GlobalNetworkEndpointGroups API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _attach_network_endpoints(self, + request: compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the attach network endpoints method over HTTP. + + Args: + request (~.compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + The request object. A request message for + GlobalNetworkEndpointGroups.AttachNetworkEndpoints. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}/attachNetworkEndpoints', + 'body': 'global_network_endpoint_groups_attach_endpoints_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalNetworkEndpointGroupsAttachEndpointsRequest.to_json( + compute.GlobalNetworkEndpointGroupsAttachEndpointsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest.to_json( + compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteGlobalNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteGlobalNetworkEndpointGroupRequest): + The request object. A request message for + GlobalNetworkEndpointGroups.Delete. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteGlobalNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteGlobalNetworkEndpointGroupRequest.to_json( + compute.DeleteGlobalNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _detach_network_endpoints(self, + request: compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the detach network endpoints method over HTTP. + + Args: + request (~.compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + The request object. A request message for + GlobalNetworkEndpointGroups.DetachNetworkEndpoints. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}/detachNetworkEndpoints', + 'body': 'global_network_endpoint_groups_detach_endpoints_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalNetworkEndpointGroupsDetachEndpointsRequest.to_json( + compute.GlobalNetworkEndpointGroupsDetachEndpointsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest.to_json( + compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetGlobalNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroup: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetGlobalNetworkEndpointGroupRequest): + The request object. A request message for + GlobalNetworkEndpointGroups.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroup: + Represents a collection of network + endpoints. A network endpoint group + (NEG) defines how a set of endpoints + should be reached, whether they are + reachable, and where they are located. + For more information about using NEGs, + see Setting up external HTTP(S) Load + Balancing with internet NEGs, Setting up + zonal NEGs, or Setting up external + HTTP(S) Load Balancing with serverless + NEGs. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetGlobalNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetGlobalNetworkEndpointGroupRequest.to_json( + compute.GetGlobalNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroup.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertGlobalNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertGlobalNetworkEndpointGroupRequest): + The request object. A request message for + GlobalNetworkEndpointGroups.Insert. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networkEndpointGroups', + 'body': 'network_endpoint_group_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertGlobalNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworkEndpointGroup.to_json( + compute.NetworkEndpointGroup( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertGlobalNetworkEndpointGroupRequest.to_json( + compute.InsertGlobalNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListGlobalNetworkEndpointGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroupList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListGlobalNetworkEndpointGroupsRequest): + The request object. A request message for + GlobalNetworkEndpointGroups.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroupList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/networkEndpointGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListGlobalNetworkEndpointGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListGlobalNetworkEndpointGroupsRequest.to_json( + compute.ListGlobalNetworkEndpointGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroupList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_network_endpoints(self, + request: compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroupsListNetworkEndpoints: + r"""Call the list network endpoints method over HTTP. + + Args: + request (~.compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest): + The request object. A request message for + GlobalNetworkEndpointGroups.ListNetworkEndpoints. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroupsListNetworkEndpoints: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}/listNetworkEndpoints', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest.to_json( + compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroupsListNetworkEndpoints.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def attach_network_endpoints(self) -> Callable[ + [compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest], + compute.Operation]: + return self._attach_network_endpoints + @ property + def delete(self) -> Callable[ + [compute.DeleteGlobalNetworkEndpointGroupRequest], + compute.Operation]: + return self._delete + @ property + def detach_network_endpoints(self) -> Callable[ + [compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest], + compute.Operation]: + return self._detach_network_endpoints + @ property + def get(self) -> Callable[ + [compute.GetGlobalNetworkEndpointGroupRequest], + compute.NetworkEndpointGroup]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertGlobalNetworkEndpointGroupRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListGlobalNetworkEndpointGroupsRequest], + compute.NetworkEndpointGroupList]: + return self._list + @ property + def list_network_endpoints(self) -> Callable[ + [compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest], + compute.NetworkEndpointGroupsListNetworkEndpoints]: + return self._list_network_endpoints + def close(self): + self._session.close() + + +__all__=( + 'GlobalNetworkEndpointGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/__init__.py new file mode 100644 index 000000000..421a2bfea --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GlobalOperationsClient + +__all__ = ( + 'GlobalOperationsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/client.py new file mode 100644 index 000000000..0a6408cb8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/client.py @@ -0,0 +1,777 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.global_operations import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import GlobalOperationsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import GlobalOperationsRestTransport + + +class GlobalOperationsClientMeta(type): + """Metaclass for the GlobalOperations client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[GlobalOperationsTransport]] + _transport_registry["rest"] = GlobalOperationsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[GlobalOperationsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GlobalOperationsClient(metaclass=GlobalOperationsClientMeta): + """The GlobalOperations API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GlobalOperationsTransport: + """Returns the transport used by the client instance. + + Returns: + GlobalOperationsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GlobalOperationsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the global operations client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GlobalOperationsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GlobalOperationsTransport): + # transport is a GlobalOperationsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListGlobalOperationsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of all operations. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListGlobalOperationsRequest, dict]): + The request object. A request message for + GlobalOperations.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_operations.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListGlobalOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListGlobalOperationsRequest): + request = compute.AggregatedListGlobalOperationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteGlobalOperationRequest, dict] = None, + *, + project: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.DeleteGlobalOperationResponse: + r"""Deletes the specified Operations resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteGlobalOperationRequest, dict]): + The request object. A request message for + GlobalOperations.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + delete. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.DeleteGlobalOperationResponse: + A response message for + GlobalOperations.Delete. See the method + description for details. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteGlobalOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteGlobalOperationRequest): + request = compute.DeleteGlobalOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetGlobalOperationRequest, dict] = None, + *, + project: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Retrieves the specified Operations resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetGlobalOperationRequest, dict]): + The request object. A request message for + GlobalOperations.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + return. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetGlobalOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetGlobalOperationRequest): + request = compute.GetGlobalOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListGlobalOperationsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of Operation resources contained + within the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListGlobalOperationsRequest, dict]): + The request object. A request message for + GlobalOperations.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_operations.pagers.ListPager: + Contains a list of Operation + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListGlobalOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListGlobalOperationsRequest): + request = compute.ListGlobalOperationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def wait(self, + request: Union[compute.WaitGlobalOperationRequest, dict] = None, + *, + project: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Waits for the specified Operation resource to return as ``DONE`` + or for the request to approach the 2 minute deadline, and + retrieves the specified Operation resource. This method differs + from the ``GET`` method in that it waits for no more than the + default deadline (2 minutes) and then returns the current state + of the operation, which might be ``DONE`` or still in progress. + This method is called on a best-effort basis. Specifically: - In + uncommon cases, when the server is overloaded, the request might + return before the default deadline is reached, or might return + after zero seconds. - If the default deadline is reached, there + is no guarantee that the operation is actually done when the + method returns. Be prepared to retry if the operation is not + ``DONE``. + + Args: + request (Union[google.cloud.compute_v1.types.WaitGlobalOperationRequest, dict]): + The request object. A request message for + GlobalOperations.Wait. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + return. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.WaitGlobalOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.WaitGlobalOperationRequest): + request = compute.WaitGlobalOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.wait] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "GlobalOperationsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/pagers.py new file mode 100644 index 000000000..acff8b1b5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.OperationAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.OperationAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.OperationAggregatedList], + request: compute.AggregatedListGlobalOperationsRequest, + response: compute.OperationAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListGlobalOperationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.OperationAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListGlobalOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.OperationAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.OperationsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.OperationsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.OperationList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.OperationList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.OperationList], + request: compute.ListGlobalOperationsRequest, + response: compute.OperationList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListGlobalOperationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.OperationList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListGlobalOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.OperationList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Operation]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/__init__.py new file mode 100644 index 000000000..2cd1d6cad --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GlobalOperationsTransport +from .rest import GlobalOperationsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GlobalOperationsTransport]] +_transport_registry['rest'] = GlobalOperationsRestTransport + +__all__ = ( + 'GlobalOperationsTransport', + 'GlobalOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/base.py new file mode 100644 index 000000000..1b5c5ec58 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class GlobalOperationsTransport(abc.ABC): + """Abstract transport class for GlobalOperations.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.wait: gapic_v1.method.wrap_method( + self.wait, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListGlobalOperationsRequest], + Union[ + compute.OperationAggregatedList, + Awaitable[compute.OperationAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteGlobalOperationRequest], + Union[ + compute.DeleteGlobalOperationResponse, + Awaitable[compute.DeleteGlobalOperationResponse] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetGlobalOperationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListGlobalOperationsRequest], + Union[ + compute.OperationList, + Awaitable[compute.OperationList] + ]]: + raise NotImplementedError() + + @property + def wait(self) -> Callable[ + [compute.WaitGlobalOperationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'GlobalOperationsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/rest.py new file mode 100644 index 000000000..f01a34b46 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_operations/transports/rest.py @@ -0,0 +1,623 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import GlobalOperationsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class GlobalOperationsRestTransport(GlobalOperationsTransport): + """REST backend transport for GlobalOperations. + + The GlobalOperations API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListGlobalOperationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.OperationAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListGlobalOperationsRequest): + The request object. A request message for + GlobalOperations.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.OperationAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/operations', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListGlobalOperationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListGlobalOperationsRequest.to_json( + compute.AggregatedListGlobalOperationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.OperationAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteGlobalOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DeleteGlobalOperationResponse: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteGlobalOperationRequest): + The request object. A request message for + GlobalOperations.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DeleteGlobalOperationResponse: + A response message for + GlobalOperations.Delete. See the method + description for details. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteGlobalOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteGlobalOperationRequest.to_json( + compute.DeleteGlobalOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DeleteGlobalOperationResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetGlobalOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetGlobalOperationRequest): + The request object. A request message for + GlobalOperations.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetGlobalOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetGlobalOperationRequest.to_json( + compute.GetGlobalOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListGlobalOperationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.OperationList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListGlobalOperationsRequest): + The request object. A request message for + GlobalOperations.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.OperationList: + Contains a list of Operation + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/operations', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListGlobalOperationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListGlobalOperationsRequest.to_json( + compute.ListGlobalOperationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.OperationList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _wait(self, + request: compute.WaitGlobalOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the wait method over HTTP. + + Args: + request (~.compute.WaitGlobalOperationRequest): + The request object. A request message for + GlobalOperations.Wait. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/operations/{operation}/wait', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.WaitGlobalOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.WaitGlobalOperationRequest.to_json( + compute.WaitGlobalOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListGlobalOperationsRequest], + compute.OperationAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteGlobalOperationRequest], + compute.DeleteGlobalOperationResponse]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetGlobalOperationRequest], + compute.Operation]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListGlobalOperationsRequest], + compute.OperationList]: + return self._list + @ property + def wait(self) -> Callable[ + [compute.WaitGlobalOperationRequest], + compute.Operation]: + return self._wait + def close(self): + self._session.close() + + +__all__=( + 'GlobalOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/__init__.py new file mode 100644 index 000000000..a93f527cf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GlobalOrganizationOperationsClient + +__all__ = ( + 'GlobalOrganizationOperationsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/client.py new file mode 100644 index 000000000..022e9c8c4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/client.py @@ -0,0 +1,568 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.global_organization_operations import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import GlobalOrganizationOperationsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import GlobalOrganizationOperationsRestTransport + + +class GlobalOrganizationOperationsClientMeta(type): + """Metaclass for the GlobalOrganizationOperations client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[GlobalOrganizationOperationsTransport]] + _transport_registry["rest"] = GlobalOrganizationOperationsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[GlobalOrganizationOperationsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GlobalOrganizationOperationsClient(metaclass=GlobalOrganizationOperationsClientMeta): + """The GlobalOrganizationOperations API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalOrganizationOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalOrganizationOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GlobalOrganizationOperationsTransport: + """Returns the transport used by the client instance. + + Returns: + GlobalOrganizationOperationsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GlobalOrganizationOperationsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the global organization operations client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GlobalOrganizationOperationsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GlobalOrganizationOperationsTransport): + # transport is a GlobalOrganizationOperationsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteGlobalOrganizationOperationRequest, dict] = None, + *, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.DeleteGlobalOrganizationOperationResponse: + r"""Deletes the specified Operations resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteGlobalOrganizationOperationRequest, dict]): + The request object. A request message for + GlobalOrganizationOperations.Delete. See the method + description for details. + operation (str): + Name of the Operations resource to + delete. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.DeleteGlobalOrganizationOperationResponse: + A response message for + GlobalOrganizationOperations.Delete. See + the method description for details. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteGlobalOrganizationOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteGlobalOrganizationOperationRequest): + request = compute.DeleteGlobalOrganizationOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetGlobalOrganizationOperationRequest, dict] = None, + *, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Retrieves the specified Operations resource. Gets a list of + operations by making a ``list()`` request. + + Args: + request (Union[google.cloud.compute_v1.types.GetGlobalOrganizationOperationRequest, dict]): + The request object. A request message for + GlobalOrganizationOperations.Get. See the method + description for details. + operation (str): + Name of the Operations resource to + return. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetGlobalOrganizationOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetGlobalOrganizationOperationRequest): + request = compute.GetGlobalOrganizationOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListGlobalOrganizationOperationsRequest, dict] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of Operation resources contained + within the specified organization. + + Args: + request (Union[google.cloud.compute_v1.types.ListGlobalOrganizationOperationsRequest, dict]): + The request object. A request message for + GlobalOrganizationOperations.List. See the method + description for details. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_organization_operations.pagers.ListPager: + Contains a list of Operation + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListGlobalOrganizationOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListGlobalOrganizationOperationsRequest): + request = compute.ListGlobalOrganizationOperationsRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "GlobalOrganizationOperationsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/pagers.py new file mode 100644 index 000000000..1e2eb8350 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.OperationList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.OperationList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.OperationList], + request: compute.ListGlobalOrganizationOperationsRequest, + response: compute.OperationList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListGlobalOrganizationOperationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.OperationList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListGlobalOrganizationOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.OperationList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Operation]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/__init__.py new file mode 100644 index 000000000..ac30f244c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GlobalOrganizationOperationsTransport +from .rest import GlobalOrganizationOperationsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GlobalOrganizationOperationsTransport]] +_transport_registry['rest'] = GlobalOrganizationOperationsRestTransport + +__all__ = ( + 'GlobalOrganizationOperationsTransport', + 'GlobalOrganizationOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/base.py new file mode 100644 index 000000000..b443d5a69 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/base.py @@ -0,0 +1,175 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class GlobalOrganizationOperationsTransport(abc.ABC): + """Abstract transport class for GlobalOrganizationOperations.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteGlobalOrganizationOperationRequest], + Union[ + compute.DeleteGlobalOrganizationOperationResponse, + Awaitable[compute.DeleteGlobalOrganizationOperationResponse] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetGlobalOrganizationOperationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListGlobalOrganizationOperationsRequest], + Union[ + compute.OperationList, + Awaitable[compute.OperationList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'GlobalOrganizationOperationsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/rest.py new file mode 100644 index 000000000..12f254f49 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_organization_operations/transports/rest.py @@ -0,0 +1,399 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import GlobalOrganizationOperationsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class GlobalOrganizationOperationsRestTransport(GlobalOrganizationOperationsTransport): + """REST backend transport for GlobalOrganizationOperations. + + The GlobalOrganizationOperations API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteGlobalOrganizationOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DeleteGlobalOrganizationOperationResponse: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteGlobalOrganizationOperationRequest): + The request object. A request message for + GlobalOrganizationOperations.Delete. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DeleteGlobalOrganizationOperationResponse: + A response message for + GlobalOrganizationOperations.Delete. See + the method description for details. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/locations/global/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ] + + request_kwargs = compute.DeleteGlobalOrganizationOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteGlobalOrganizationOperationRequest.to_json( + compute.DeleteGlobalOrganizationOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DeleteGlobalOrganizationOperationResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetGlobalOrganizationOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetGlobalOrganizationOperationRequest): + The request object. A request message for + GlobalOrganizationOperations.Get. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ] + + request_kwargs = compute.GetGlobalOrganizationOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetGlobalOrganizationOperationRequest.to_json( + compute.GetGlobalOrganizationOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListGlobalOrganizationOperationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.OperationList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListGlobalOrganizationOperationsRequest): + The request object. A request message for + GlobalOrganizationOperations.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.OperationList: + Contains a list of Operation + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/locations/global/operations', + }, + ] + + request_kwargs = compute.ListGlobalOrganizationOperationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListGlobalOrganizationOperationsRequest.to_json( + compute.ListGlobalOrganizationOperationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.OperationList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteGlobalOrganizationOperationRequest], + compute.DeleteGlobalOrganizationOperationResponse]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetGlobalOrganizationOperationRequest], + compute.Operation]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListGlobalOrganizationOperationsRequest], + compute.OperationList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'GlobalOrganizationOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/__init__.py new file mode 100644 index 000000000..33243ea08 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import GlobalPublicDelegatedPrefixesClient + +__all__ = ( + 'GlobalPublicDelegatedPrefixesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/client.py new file mode 100644 index 000000000..d46f82d81 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/client.py @@ -0,0 +1,794 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.global_public_delegated_prefixes import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import GlobalPublicDelegatedPrefixesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import GlobalPublicDelegatedPrefixesRestTransport + + +class GlobalPublicDelegatedPrefixesClientMeta(type): + """Metaclass for the GlobalPublicDelegatedPrefixes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[GlobalPublicDelegatedPrefixesTransport]] + _transport_registry["rest"] = GlobalPublicDelegatedPrefixesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[GlobalPublicDelegatedPrefixesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class GlobalPublicDelegatedPrefixesClient(metaclass=GlobalPublicDelegatedPrefixesClientMeta): + """The GlobalPublicDelegatedPrefixes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalPublicDelegatedPrefixesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + GlobalPublicDelegatedPrefixesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> GlobalPublicDelegatedPrefixesTransport: + """Returns the transport used by the client instance. + + Returns: + GlobalPublicDelegatedPrefixesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, GlobalPublicDelegatedPrefixesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the global public delegated prefixes client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, GlobalPublicDelegatedPrefixesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, GlobalPublicDelegatedPrefixesTransport): + # transport is a GlobalPublicDelegatedPrefixesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteGlobalPublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + public_delegated_prefix: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified global PublicDelegatedPrefix. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteGlobalPublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix + resource to delete. + + This corresponds to the ``public_delegated_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_delegated_prefix]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteGlobalPublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteGlobalPublicDelegatedPrefixeRequest): + request = compute.DeleteGlobalPublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_delegated_prefix is not None: + request.public_delegated_prefix = public_delegated_prefix + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetGlobalPublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + public_delegated_prefix: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.PublicDelegatedPrefix: + r"""Returns the specified global PublicDelegatedPrefix + resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetGlobalPublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Get. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix + resource to return. + + This corresponds to the ``public_delegated_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.PublicDelegatedPrefix: + A PublicDelegatedPrefix resource + represents an IP block within a + PublicAdvertisedPrefix that is + configured within a single cloud scope + (global or region). IPs in the block can + be allocated to resources within that + scope. Public delegated prefixes may be + further broken up into smaller IP blocks + in the same scope as the parent block. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_delegated_prefix]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetGlobalPublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetGlobalPublicDelegatedPrefixeRequest): + request = compute.GetGlobalPublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_delegated_prefix is not None: + request.public_delegated_prefix = public_delegated_prefix + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertGlobalPublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + public_delegated_prefix_resource: compute.PublicDelegatedPrefix = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a global PublicDelegatedPrefix in the + specified project using the parameters that are included + in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertGlobalPublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + This corresponds to the ``public_delegated_prefix_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_delegated_prefix_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertGlobalPublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertGlobalPublicDelegatedPrefixeRequest): + request = compute.InsertGlobalPublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_delegated_prefix_resource is not None: + request.public_delegated_prefix_resource = public_delegated_prefix_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListGlobalPublicDelegatedPrefixesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists the global PublicDelegatedPrefixes for a + project. + + Args: + request (Union[google.cloud.compute_v1.types.ListGlobalPublicDelegatedPrefixesRequest, dict]): + The request object. A request message for + GlobalPublicDelegatedPrefixes.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.global_public_delegated_prefixes.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListGlobalPublicDelegatedPrefixesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListGlobalPublicDelegatedPrefixesRequest): + request = compute.ListGlobalPublicDelegatedPrefixesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchGlobalPublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + public_delegated_prefix: str = None, + public_delegated_prefix_resource: compute.PublicDelegatedPrefix = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified global PublicDelegatedPrefix + resource with the data included in the request. This + method supports PATCH semantics and uses JSON merge + patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchGlobalPublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix + resource to patch. + + This corresponds to the ``public_delegated_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + This corresponds to the ``public_delegated_prefix_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_delegated_prefix, public_delegated_prefix_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchGlobalPublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchGlobalPublicDelegatedPrefixeRequest): + request = compute.PatchGlobalPublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_delegated_prefix is not None: + request.public_delegated_prefix = public_delegated_prefix + if public_delegated_prefix_resource is not None: + request.public_delegated_prefix_resource = public_delegated_prefix_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "GlobalPublicDelegatedPrefixesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/pagers.py new file mode 100644 index 000000000..22698839a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.PublicDelegatedPrefixList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.PublicDelegatedPrefixList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.PublicDelegatedPrefixList], + request: compute.ListGlobalPublicDelegatedPrefixesRequest, + response: compute.PublicDelegatedPrefixList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListGlobalPublicDelegatedPrefixesRequest): + The initial request object. + response (google.cloud.compute_v1.types.PublicDelegatedPrefixList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListGlobalPublicDelegatedPrefixesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.PublicDelegatedPrefixList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.PublicDelegatedPrefix]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/__init__.py new file mode 100644 index 000000000..4e59f6fbb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import GlobalPublicDelegatedPrefixesTransport +from .rest import GlobalPublicDelegatedPrefixesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[GlobalPublicDelegatedPrefixesTransport]] +_transport_registry['rest'] = GlobalPublicDelegatedPrefixesRestTransport + +__all__ = ( + 'GlobalPublicDelegatedPrefixesTransport', + 'GlobalPublicDelegatedPrefixesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/base.py new file mode 100644 index 000000000..b72a6dede --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class GlobalPublicDelegatedPrefixesTransport(abc.ABC): + """Abstract transport class for GlobalPublicDelegatedPrefixes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteGlobalPublicDelegatedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetGlobalPublicDelegatedPrefixeRequest], + Union[ + compute.PublicDelegatedPrefix, + Awaitable[compute.PublicDelegatedPrefix] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertGlobalPublicDelegatedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListGlobalPublicDelegatedPrefixesRequest], + Union[ + compute.PublicDelegatedPrefixList, + Awaitable[compute.PublicDelegatedPrefixList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchGlobalPublicDelegatedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'GlobalPublicDelegatedPrefixesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/rest.py new file mode 100644 index 000000000..a9f476ac7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/global_public_delegated_prefixes/transports/rest.py @@ -0,0 +1,660 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import GlobalPublicDelegatedPrefixesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class GlobalPublicDelegatedPrefixesRestTransport(GlobalPublicDelegatedPrefixesTransport): + """REST backend transport for GlobalPublicDelegatedPrefixes. + + The GlobalPublicDelegatedPrefixes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteGlobalPublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteGlobalPublicDelegatedPrefixeRequest): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Delete. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/publicDelegatedPrefixes/{public_delegated_prefix}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_delegated_prefix", + "publicDelegatedPrefix" + ), + ] + + request_kwargs = compute.DeleteGlobalPublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteGlobalPublicDelegatedPrefixeRequest.to_json( + compute.DeleteGlobalPublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetGlobalPublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PublicDelegatedPrefix: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetGlobalPublicDelegatedPrefixeRequest): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Get. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PublicDelegatedPrefix: + A PublicDelegatedPrefix resource + represents an IP block within a + PublicAdvertisedPrefix that is + configured within a single cloud scope + (global or region). IPs in the block can + be allocated to resources within that + scope. Public delegated prefixes may be + further broken up into smaller IP blocks + in the same scope as the parent block. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/publicDelegatedPrefixes/{public_delegated_prefix}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_delegated_prefix", + "publicDelegatedPrefix" + ), + ] + + request_kwargs = compute.GetGlobalPublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetGlobalPublicDelegatedPrefixeRequest.to_json( + compute.GetGlobalPublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PublicDelegatedPrefix.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertGlobalPublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertGlobalPublicDelegatedPrefixeRequest): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Insert. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/publicDelegatedPrefixes', + 'body': 'public_delegated_prefix_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertGlobalPublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PublicDelegatedPrefix.to_json( + compute.PublicDelegatedPrefix( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertGlobalPublicDelegatedPrefixeRequest.to_json( + compute.InsertGlobalPublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListGlobalPublicDelegatedPrefixesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PublicDelegatedPrefixList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListGlobalPublicDelegatedPrefixesRequest): + The request object. A request message for + GlobalPublicDelegatedPrefixes.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PublicDelegatedPrefixList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/publicDelegatedPrefixes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListGlobalPublicDelegatedPrefixesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListGlobalPublicDelegatedPrefixesRequest.to_json( + compute.ListGlobalPublicDelegatedPrefixesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PublicDelegatedPrefixList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchGlobalPublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchGlobalPublicDelegatedPrefixeRequest): + The request object. A request message for + GlobalPublicDelegatedPrefixes.Patch. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/publicDelegatedPrefixes/{public_delegated_prefix}', + 'body': 'public_delegated_prefix_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_delegated_prefix", + "publicDelegatedPrefix" + ), + ] + + request_kwargs = compute.PatchGlobalPublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PublicDelegatedPrefix.to_json( + compute.PublicDelegatedPrefix( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchGlobalPublicDelegatedPrefixeRequest.to_json( + compute.PatchGlobalPublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteGlobalPublicDelegatedPrefixeRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetGlobalPublicDelegatedPrefixeRequest], + compute.PublicDelegatedPrefix]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertGlobalPublicDelegatedPrefixeRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListGlobalPublicDelegatedPrefixesRequest], + compute.PublicDelegatedPrefixList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchGlobalPublicDelegatedPrefixeRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'GlobalPublicDelegatedPrefixesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/__init__.py new file mode 100644 index 000000000..d9f194253 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import HealthChecksClient + +__all__ = ( + 'HealthChecksClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/client.py new file mode 100644 index 000000000..a2820f74e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/client.py @@ -0,0 +1,983 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.health_checks import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import HealthChecksTransport, DEFAULT_CLIENT_INFO +from .transports.rest import HealthChecksRestTransport + + +class HealthChecksClientMeta(type): + """Metaclass for the HealthChecks client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[HealthChecksTransport]] + _transport_registry["rest"] = HealthChecksRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[HealthChecksTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class HealthChecksClient(metaclass=HealthChecksClientMeta): + """The HealthChecks API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + HealthChecksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + HealthChecksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> HealthChecksTransport: + """Returns the transport used by the client instance. + + Returns: + HealthChecksTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, HealthChecksTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the health checks client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, HealthChecksTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, HealthChecksTransport): + # transport is a HealthChecksTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListHealthChecksRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of all HealthCheck resources, + regional and global, available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListHealthChecksRequest, dict]): + The request object. A request message for + HealthChecks.AggregatedList. See the method description + for details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.health_checks.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListHealthChecksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListHealthChecksRequest): + request = compute.AggregatedListHealthChecksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteHealthCheckRequest, dict] = None, + *, + project: str = None, + health_check: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified HealthCheck resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteHealthCheckRequest, dict]): + The request object. A request message for + HealthChecks.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + delete. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, health_check]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteHealthCheckRequest): + request = compute.DeleteHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if health_check is not None: + request.health_check = health_check + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetHealthCheckRequest, dict] = None, + *, + project: str = None, + health_check: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.HealthCheck: + r"""Returns the specified HealthCheck resource. Gets a + list of available health checks by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetHealthCheckRequest, dict]): + The request object. A request message for + HealthChecks.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + return. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.HealthCheck: + Represents a Health Check resource. Google Compute + Engine has two Health Check resources: \* + [Global](/compute/docs/reference/rest/v1/healthChecks) + \* + [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) + Internal HTTP(S) load balancers must use regional health + checks (compute.v1.regionHealthChecks). Traffic Director + must use global health checks (compute.v1.HealthChecks). + Internal TCP/UDP load balancers can use either regional + or global health checks (compute.v1.regionHealthChecks + or compute.v1.HealthChecks). External HTTP(S), TCP + proxy, and SSL proxy load balancers as well as managed + instance group auto-healing must use global health + checks (compute.v1.HealthChecks). Backend service-based + network load balancers must use regional health checks + (compute.v1.regionHealthChecks). Target pool-based + network load balancers must use legacy HTTP health + checks (compute.v1.httpHealthChecks). For more + information, see Health checks overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, health_check]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetHealthCheckRequest): + request = compute.GetHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if health_check is not None: + request.health_check = health_check + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertHealthCheckRequest, dict] = None, + *, + project: str = None, + health_check_resource: compute.HealthCheck = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a HealthCheck resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertHealthCheckRequest, dict]): + The request object. A request message for + HealthChecks.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + This corresponds to the ``health_check_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, health_check_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertHealthCheckRequest): + request = compute.InsertHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if health_check_resource is not None: + request.health_check_resource = health_check_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListHealthChecksRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of HealthCheck resources available + to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListHealthChecksRequest, dict]): + The request object. A request message for + HealthChecks.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.health_checks.pagers.ListPager: + Contains a list of HealthCheck + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListHealthChecksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListHealthChecksRequest): + request = compute.ListHealthChecksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchHealthCheckRequest, dict] = None, + *, + project: str = None, + health_check: str = None, + health_check_resource: compute.HealthCheck = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates a HealthCheck resource in the specified + project using the data included in the request. This + method supports PATCH semantics and uses the JSON merge + patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchHealthCheckRequest, dict]): + The request object. A request message for + HealthChecks.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + patch. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + This corresponds to the ``health_check_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, health_check, health_check_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchHealthCheckRequest): + request = compute.PatchHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if health_check is not None: + request.health_check = health_check + if health_check_resource is not None: + request.health_check_resource = health_check_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateHealthCheckRequest, dict] = None, + *, + project: str = None, + health_check: str = None, + health_check_resource: compute.HealthCheck = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates a HealthCheck resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateHealthCheckRequest, dict]): + The request object. A request message for + HealthChecks.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + update. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + This corresponds to the ``health_check_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, health_check, health_check_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateHealthCheckRequest): + request = compute.UpdateHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if health_check is not None: + request.health_check = health_check + if health_check_resource is not None: + request.health_check_resource = health_check_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "HealthChecksClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/pagers.py new file mode 100644 index 000000000..8fe6d5fdb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.HealthChecksAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.HealthChecksAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.HealthChecksAggregatedList], + request: compute.AggregatedListHealthChecksRequest, + response: compute.HealthChecksAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListHealthChecksRequest): + The initial request object. + response (google.cloud.compute_v1.types.HealthChecksAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListHealthChecksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.HealthChecksAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.HealthChecksScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.HealthChecksScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.HealthCheckList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.HealthCheckList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.HealthCheckList], + request: compute.ListHealthChecksRequest, + response: compute.HealthCheckList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListHealthChecksRequest): + The initial request object. + response (google.cloud.compute_v1.types.HealthCheckList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListHealthChecksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.HealthCheckList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.HealthCheck]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/__init__.py new file mode 100644 index 000000000..45b0482d9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import HealthChecksTransport +from .rest import HealthChecksRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[HealthChecksTransport]] +_transport_registry['rest'] = HealthChecksRestTransport + +__all__ = ( + 'HealthChecksTransport', + 'HealthChecksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/base.py new file mode 100644 index 000000000..6a38a5b0c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class HealthChecksTransport(abc.ABC): + """Abstract transport class for HealthChecks.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListHealthChecksRequest], + Union[ + compute.HealthChecksAggregatedList, + Awaitable[compute.HealthChecksAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetHealthCheckRequest], + Union[ + compute.HealthCheck, + Awaitable[compute.HealthCheck] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListHealthChecksRequest], + Union[ + compute.HealthCheckList, + Awaitable[compute.HealthCheckList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'HealthChecksTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/rest.py new file mode 100644 index 000000000..4f7d235c4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/health_checks/transports/rest.py @@ -0,0 +1,881 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import HealthChecksTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class HealthChecksRestTransport(HealthChecksTransport): + """REST backend transport for HealthChecks. + + The HealthChecks API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListHealthChecksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.HealthChecksAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListHealthChecksRequest): + The request object. A request message for + HealthChecks.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.HealthChecksAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/healthChecks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListHealthChecksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListHealthChecksRequest.to_json( + compute.AggregatedListHealthChecksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.HealthChecksAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteHealthCheckRequest): + The request object. A request message for + HealthChecks.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/healthChecks/{health_check}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteHealthCheckRequest.to_json( + compute.DeleteHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.HealthCheck: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetHealthCheckRequest): + The request object. A request message for + HealthChecks.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.HealthCheck: + Represents a Health Check resource. Google Compute + Engine has two Health Check resources: \* + `Global `__ + \* + `Regional `__ + Internal HTTP(S) load balancers must use regional health + checks (``compute.v1.regionHealthChecks``). Traffic + Director must use global health checks + (``compute.v1.HealthChecks``). Internal TCP/UDP load + balancers can use either regional or global health + checks (``compute.v1.regionHealthChecks`` or + ``compute.v1.HealthChecks``). External HTTP(S), TCP + proxy, and SSL proxy load balancers as well as managed + instance group auto-healing must use global health + checks (``compute.v1.HealthChecks``). Backend + service-based network load balancers must use regional + health checks (``compute.v1.regionHealthChecks``). + Target pool-based network load balancers must use legacy + HTTP health checks (``compute.v1.httpHealthChecks``). + For more information, see Health checks overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/healthChecks/{health_check}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetHealthCheckRequest.to_json( + compute.GetHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.HealthCheck.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertHealthCheckRequest): + The request object. A request message for + HealthChecks.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/healthChecks', + 'body': 'health_check_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheck.to_json( + compute.HealthCheck( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertHealthCheckRequest.to_json( + compute.InsertHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListHealthChecksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.HealthCheckList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListHealthChecksRequest): + The request object. A request message for + HealthChecks.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.HealthCheckList: + Contains a list of HealthCheck + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/healthChecks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListHealthChecksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListHealthChecksRequest.to_json( + compute.ListHealthChecksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.HealthCheckList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchHealthCheckRequest): + The request object. A request message for + HealthChecks.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/healthChecks/{health_check}', + 'body': 'health_check_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheck.to_json( + compute.HealthCheck( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchHealthCheckRequest.to_json( + compute.PatchHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateHealthCheckRequest): + The request object. A request message for + HealthChecks.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/global/healthChecks/{health_check}', + 'body': 'health_check_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.UpdateHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheck.to_json( + compute.HealthCheck( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateHealthCheckRequest.to_json( + compute.UpdateHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListHealthChecksRequest], + compute.HealthChecksAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteHealthCheckRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetHealthCheckRequest], + compute.HealthCheck]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertHealthCheckRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListHealthChecksRequest], + compute.HealthCheckList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchHealthCheckRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateHealthCheckRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'HealthChecksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/__init__.py new file mode 100644 index 000000000..5d8cd9547 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ImageFamilyViewsClient + +__all__ = ( + 'ImageFamilyViewsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/client.py new file mode 100644 index 000000000..4a491fb42 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/client.py @@ -0,0 +1,440 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.types import compute +from .transports.base import ImageFamilyViewsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ImageFamilyViewsRestTransport + + +class ImageFamilyViewsClientMeta(type): + """Metaclass for the ImageFamilyViews client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ImageFamilyViewsTransport]] + _transport_registry["rest"] = ImageFamilyViewsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ImageFamilyViewsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ImageFamilyViewsClient(metaclass=ImageFamilyViewsClientMeta): + """The ImageFamilyViews API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ImageFamilyViewsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ImageFamilyViewsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ImageFamilyViewsTransport: + """Returns the transport used by the client instance. + + Returns: + ImageFamilyViewsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ImageFamilyViewsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the image family views client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ImageFamilyViewsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ImageFamilyViewsTransport): + # transport is a ImageFamilyViewsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def get(self, + request: Union[compute.GetImageFamilyViewRequest, dict] = None, + *, + project: str = None, + zone: str = None, + family: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.ImageFamilyView: + r"""Returns the latest image that is part of an image + family, is not deprecated and is rolled out in the + specified zone. + + Args: + request (Union[google.cloud.compute_v1.types.GetImageFamilyViewRequest, dict]): + The request object. A request message for + ImageFamilyViews.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + family (str): + Name of the image family to search + for. + + This corresponds to the ``family`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.ImageFamilyView: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, family]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetImageFamilyViewRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetImageFamilyViewRequest): + request = compute.GetImageFamilyViewRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if family is not None: + request.family = family + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ImageFamilyViewsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/__init__.py new file mode 100644 index 000000000..e6a81f338 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ImageFamilyViewsTransport +from .rest import ImageFamilyViewsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ImageFamilyViewsTransport]] +_transport_registry['rest'] = ImageFamilyViewsRestTransport + +__all__ = ( + 'ImageFamilyViewsTransport', + 'ImageFamilyViewsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/base.py new file mode 100644 index 000000000..312d97352 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/base.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ImageFamilyViewsTransport(abc.ABC): + """Abstract transport class for ImageFamilyViews.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetImageFamilyViewRequest], + Union[ + compute.ImageFamilyView, + Awaitable[compute.ImageFamilyView] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ImageFamilyViewsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/rest.py new file mode 100644 index 000000000..2c5faee20 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/image_family_views/transports/rest.py @@ -0,0 +1,224 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ImageFamilyViewsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ImageFamilyViewsRestTransport(ImageFamilyViewsTransport): + """REST backend transport for ImageFamilyViews. + + The ImageFamilyViews API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _get(self, + request: compute.GetImageFamilyViewRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ImageFamilyView: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetImageFamilyViewRequest): + The request object. A request message for + ImageFamilyViews.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ImageFamilyView: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/imageFamilyViews/{family}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "family", + "family" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetImageFamilyViewRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetImageFamilyViewRequest.to_json( + compute.GetImageFamilyViewRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ImageFamilyView.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def get(self) -> Callable[ + [compute.GetImageFamilyViewRequest], + compute.ImageFamilyView]: + return self._get + def close(self): + self._session.close() + + +__all__=( + 'ImageFamilyViewsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/images/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/__init__.py new file mode 100644 index 000000000..500ef3e80 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ImagesClient + +__all__ = ( + 'ImagesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/images/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/client.py new file mode 100644 index 000000000..98be8152b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/client.py @@ -0,0 +1,1376 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.images import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ImagesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ImagesRestTransport + + +class ImagesClientMeta(type): + """Metaclass for the Images client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ImagesTransport]] + _transport_registry["rest"] = ImagesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ImagesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ImagesClient(metaclass=ImagesClientMeta): + """The Images API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ImagesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ImagesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ImagesTransport: + """Returns the transport used by the client instance. + + Returns: + ImagesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ImagesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the images client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ImagesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ImagesTransport): + # transport is a ImagesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteImageRequest, dict] = None, + *, + project: str = None, + image: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified image. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteImageRequest, dict]): + The request object. A request message for Images.Delete. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + image (str): + Name of the image resource to delete. + This corresponds to the ``image`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, image]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteImageRequest): + request = compute.DeleteImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if image is not None: + request.image = image + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def deprecate(self, + request: Union[compute.DeprecateImageRequest, dict] = None, + *, + project: str = None, + image: str = None, + deprecation_status_resource: compute.DeprecationStatus = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the deprecation status of an image. If an empty + request body is given, clears the deprecation status + instead. + + Args: + request (Union[google.cloud.compute_v1.types.DeprecateImageRequest, dict]): + The request object. A request message for + Images.Deprecate. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + image (str): + Image name. + This corresponds to the ``image`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + deprecation_status_resource (google.cloud.compute_v1.types.DeprecationStatus): + The body resource for this request + This corresponds to the ``deprecation_status_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, image, deprecation_status_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeprecateImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeprecateImageRequest): + request = compute.DeprecateImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if image is not None: + request.image = image + if deprecation_status_resource is not None: + request.deprecation_status_resource = deprecation_status_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.deprecate] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetImageRequest, dict] = None, + *, + project: str = None, + image: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Image: + r"""Returns the specified image. Gets a list of available + images by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetImageRequest, dict]): + The request object. A request message for Images.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + image (str): + Name of the image resource to return. + This corresponds to the ``image`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Image: + Represents an Image resource. You can + use images to create boot disks for your + VM instances. For more information, read + Images. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, image]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetImageRequest): + request = compute.GetImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if image is not None: + request.image = image + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_from_family(self, + request: Union[compute.GetFromFamilyImageRequest, dict] = None, + *, + project: str = None, + family: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Image: + r"""Returns the latest image that is part of an image + family and is not deprecated. + + Args: + request (Union[google.cloud.compute_v1.types.GetFromFamilyImageRequest, dict]): + The request object. A request message for + Images.GetFromFamily. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + family (str): + Name of the image family to search + for. + + This corresponds to the ``family`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Image: + Represents an Image resource. You can + use images to create boot disks for your + VM instances. For more information, read + Images. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, family]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetFromFamilyImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetFromFamilyImageRequest): + request = compute.GetFromFamilyImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if family is not None: + request.family = family + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_from_family] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyImageRequest, dict] = None, + *, + project: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyImageRequest, dict]): + The request object. A request message for + Images.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyImageRequest): + request = compute.GetIamPolicyImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertImageRequest, dict] = None, + *, + project: str = None, + image_resource: compute.Image = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an image in the specified project using the + data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertImageRequest, dict]): + The request object. A request message for Images.Insert. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + image_resource (google.cloud.compute_v1.types.Image): + The body resource for this request + This corresponds to the ``image_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, image_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertImageRequest): + request = compute.InsertImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if image_resource is not None: + request.image_resource = image_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListImagesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of custom images available to the + specified project. Custom images are images you create + that belong to your project. This method does not get + any images that belong to other projects, including + publicly-available images, like Debian 8. If you want to + get a list of publicly-available images, use this method + to make a request to the respective image project, such + as debian-cloud or windows-cloud. + + Args: + request (Union[google.cloud.compute_v1.types.ListImagesRequest, dict]): + The request object. A request message for Images.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.images.pagers.ListPager: + Contains a list of images. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListImagesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListImagesRequest): + request = compute.ListImagesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchImageRequest, dict] = None, + *, + project: str = None, + image: str = None, + image_resource: compute.Image = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified image with the data included in + the request. Only the following fields can be modified: + family, description, deprecation status. + + Args: + request (Union[google.cloud.compute_v1.types.PatchImageRequest, dict]): + The request object. A request message for Images.Patch. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + image (str): + Name of the image resource to patch. + This corresponds to the ``image`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + image_resource (google.cloud.compute_v1.types.Image): + The body resource for this request + This corresponds to the ``image_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, image, image_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchImageRequest): + request = compute.PatchImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if image is not None: + request.image = image + if image_resource is not None: + request.image_resource = image_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyImageRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_policy_request_resource: compute.GlobalSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyImageRequest, dict]): + The request object. A request message for + Images.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + This corresponds to the ``global_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyImageRequest): + request = compute.SetIamPolicyImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_policy_request_resource is not None: + request.global_set_policy_request_resource = global_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsImageRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_labels_request_resource: compute.GlobalSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on an image. To learn more about + labels, read the Labeling Resources documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsImageRequest, dict]): + The request object. A request message for + Images.SetLabels. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + This corresponds to the ``global_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsImageRequest): + request = compute.SetLabelsImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_labels_request_resource is not None: + request.global_set_labels_request_resource = global_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsImageRequest, dict] = None, + *, + project: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsImageRequest, dict]): + The request object. A request message for + Images.TestIamPermissions. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsImageRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsImageRequest): + request = compute.TestIamPermissionsImageRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ImagesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/images/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/pagers.py new file mode 100644 index 000000000..37b2aa326 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ImageList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ImageList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ImageList], + request: compute.ListImagesRequest, + response: compute.ImageList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListImagesRequest): + The initial request object. + response (google.cloud.compute_v1.types.ImageList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListImagesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ImageList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Image]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/__init__.py new file mode 100644 index 000000000..4ce8da021 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ImagesTransport +from .rest import ImagesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ImagesTransport]] +_transport_registry['rest'] = ImagesRestTransport + +__all__ = ( + 'ImagesTransport', + 'ImagesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/base.py new file mode 100644 index 000000000..93ea05e3c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/base.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ImagesTransport(abc.ABC): + """Abstract transport class for Images.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.deprecate: gapic_v1.method.wrap_method( + self.deprecate, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_from_family: gapic_v1.method.wrap_method( + self.get_from_family, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteImageRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def deprecate(self) -> Callable[ + [compute.DeprecateImageRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetImageRequest], + Union[ + compute.Image, + Awaitable[compute.Image] + ]]: + raise NotImplementedError() + + @property + def get_from_family(self) -> Callable[ + [compute.GetFromFamilyImageRequest], + Union[ + compute.Image, + Awaitable[compute.Image] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyImageRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertImageRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListImagesRequest], + Union[ + compute.ImageList, + Awaitable[compute.ImageList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchImageRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyImageRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsImageRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsImageRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ImagesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/rest.py new file mode 100644 index 000000000..e75bb1347 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/images/transports/rest.py @@ -0,0 +1,1364 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ImagesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ImagesRestTransport(ImagesTransport): + """REST backend transport for Images. + + The Images API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteImageRequest): + The request object. A request message for Images.Delete. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/images/{image}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "image", + "image" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteImageRequest.to_json( + compute.DeleteImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _deprecate(self, + request: compute.DeprecateImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the deprecate method over HTTP. + + Args: + request (~.compute.DeprecateImageRequest): + The request object. A request message for + Images.Deprecate. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/images/{image}/deprecate', + 'body': 'deprecation_status_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "image", + "image" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeprecateImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.DeprecationStatus.to_json( + compute.DeprecationStatus( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeprecateImageRequest.to_json( + compute.DeprecateImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Image: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetImageRequest): + The request object. A request message for Images.Get. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Image: + Represents an Image resource. You can + use images to create boot disks for your + VM instances. For more information, read + Images. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/images/{image}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "image", + "image" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetImageRequest.to_json( + compute.GetImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Image.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_from_family(self, + request: compute.GetFromFamilyImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Image: + r"""Call the get from family method over HTTP. + + Args: + request (~.compute.GetFromFamilyImageRequest): + The request object. A request message for + Images.GetFromFamily. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Image: + Represents an Image resource. You can + use images to create boot disks for your + VM instances. For more information, read + Images. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/images/family/{family}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "family", + "family" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetFromFamilyImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetFromFamilyImageRequest.to_json( + compute.GetFromFamilyImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Image.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyImageRequest): + The request object. A request message for + Images.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/images/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyImageRequest.to_json( + compute.GetIamPolicyImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertImageRequest): + The request object. A request message for Images.Insert. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/images', + 'body': 'image_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Image.to_json( + compute.Image( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertImageRequest.to_json( + compute.InsertImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListImagesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ImageList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListImagesRequest): + The request object. A request message for Images.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ImageList: + Contains a list of images. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/images', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListImagesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListImagesRequest.to_json( + compute.ListImagesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ImageList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchImageRequest): + The request object. A request message for Images.Patch. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/images/{image}', + 'body': 'image_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "image", + "image" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Image.to_json( + compute.Image( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchImageRequest.to_json( + compute.PatchImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyImageRequest): + The request object. A request message for + Images.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/images/{resource}/setIamPolicy', + 'body': 'global_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetPolicyRequest.to_json( + compute.GlobalSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyImageRequest.to_json( + compute.SetIamPolicyImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsImageRequest): + The request object. A request message for + Images.SetLabels. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/images/{resource}/setLabels', + 'body': 'global_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetLabelsImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetLabelsRequest.to_json( + compute.GlobalSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsImageRequest.to_json( + compute.SetLabelsImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsImageRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsImageRequest): + The request object. A request message for + Images.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/images/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsImageRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsImageRequest.to_json( + compute.TestIamPermissionsImageRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteImageRequest], + compute.Operation]: + return self._delete + @ property + def deprecate(self) -> Callable[ + [compute.DeprecateImageRequest], + compute.Operation]: + return self._deprecate + @ property + def get(self) -> Callable[ + [compute.GetImageRequest], + compute.Image]: + return self._get + @ property + def get_from_family(self) -> Callable[ + [compute.GetFromFamilyImageRequest], + compute.Image]: + return self._get_from_family + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyImageRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertImageRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListImagesRequest], + compute.ImageList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchImageRequest], + compute.Operation]: + return self._patch + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyImageRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsImageRequest], + compute.Operation]: + return self._set_labels + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsImageRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'ImagesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/__init__.py new file mode 100644 index 000000000..6a6ade026 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import InstanceGroupManagersClient + +__all__ = ( + 'InstanceGroupManagersClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/client.py new file mode 100644 index 000000000..11aaf8b73 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/client.py @@ -0,0 +1,2524 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.instance_group_managers import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import InstanceGroupManagersTransport, DEFAULT_CLIENT_INFO +from .transports.rest import InstanceGroupManagersRestTransport + + +class InstanceGroupManagersClientMeta(type): + """Metaclass for the InstanceGroupManagers client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[InstanceGroupManagersTransport]] + _transport_registry["rest"] = InstanceGroupManagersRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[InstanceGroupManagersTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InstanceGroupManagersClient(metaclass=InstanceGroupManagersClientMeta): + """The InstanceGroupManagers API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceGroupManagersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceGroupManagersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InstanceGroupManagersTransport: + """Returns the transport used by the client instance. + + Returns: + InstanceGroupManagersTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, InstanceGroupManagersTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the instance group managers client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, InstanceGroupManagersTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InstanceGroupManagersTransport): + # transport is a InstanceGroupManagersTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def abandon_instances(self, + request: Union[compute.AbandonInstancesInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_abandon_instances_request_resource: compute.InstanceGroupManagersAbandonInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Flags the specified instances to be removed from the + managed instance group. Abandoning an instance does not + delete the instance, but it does remove the instance + from any target pools that are applied by the managed + instance group. This method reduces the targetSize of + the managed instance group by the number of instances + that you abandon. This operation is marked as DONE when + the action is scheduled even if the instances have not + yet been removed from the group. You must separately + verify the status of the abandoning action with the + listmanagedinstances method. If the group is part of a + backend service that has enabled connection draining, it + can take up to 60 seconds after the connection draining + duration has elapsed before the VM instance is removed + or deleted. You can specify a maximum of 1000 instances + with this method per request. + + Args: + request (Union[google.cloud.compute_v1.types.AbandonInstancesInstanceGroupManagerRequest, dict]): + The request object. Messages + A request message for + InstanceGroupManagers.AbandonInstances. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_abandon_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersAbandonInstancesRequest): + The body resource for this request + This corresponds to the ``instance_group_managers_abandon_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_abandon_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AbandonInstancesInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AbandonInstancesInstanceGroupManagerRequest): + request = compute.AbandonInstancesInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_abandon_instances_request_resource is not None: + request.instance_group_managers_abandon_instances_request_resource = instance_group_managers_abandon_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.abandon_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def aggregated_list(self, + request: Union[compute.AggregatedListInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of managed instance groups and + groups them by zone. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListInstanceGroupManagersRequest, dict]): + The request object. A request message for + InstanceGroupManagers.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_group_managers.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListInstanceGroupManagersRequest): + request = compute.AggregatedListInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def apply_updates_to_instances(self, + request: Union[compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_apply_updates_request_resource: compute.InstanceGroupManagersApplyUpdatesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Applies changes to selected instances on the managed + instance group. This method can be used to apply new + overrides and/or new versions. + + Args: + request (Union[google.cloud.compute_v1.types.ApplyUpdatesToInstancesInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.ApplyUpdatesToInstances. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + Should conform to RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group, should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_apply_updates_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersApplyUpdatesRequest): + The body resource for this request + This corresponds to the ``instance_group_managers_apply_updates_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_apply_updates_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest): + request = compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_apply_updates_request_resource is not None: + request.instance_group_managers_apply_updates_request_resource = instance_group_managers_apply_updates_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.apply_updates_to_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_instances(self, + request: Union[compute.CreateInstancesInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_create_instances_request_resource: compute.InstanceGroupManagersCreateInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates instances with per-instance configs in this + managed instance group. Instances are created using the + current instance template. The create instances + operation is marked DONE if the createInstances request + is successful. The underlying actions take additional + time. You must separately verify the status of the + creating or actions with the listmanagedinstances + method. + + Args: + request (Union[google.cloud.compute_v1.types.CreateInstancesInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.CreateInstances. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. It + should conform to RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_create_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersCreateInstancesRequest): + The body resource for this request + This corresponds to the ``instance_group_managers_create_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_create_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.CreateInstancesInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.CreateInstancesInstanceGroupManagerRequest): + request = compute.CreateInstancesInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_create_instances_request_resource is not None: + request.instance_group_managers_create_instances_request_resource = instance_group_managers_create_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified managed instance group and all + of the instances in that group. Note that the instance + group must not belong to a backend service. Read + Deleting an instance group for more information. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group to delete. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInstanceGroupManagerRequest): + request = compute.DeleteInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_instances(self, + request: Union[compute.DeleteInstancesInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_delete_instances_request_resource: compute.InstanceGroupManagersDeleteInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Flags the specified instances in the managed instance + group for immediate deletion. The instances are also + removed from any target pools of which they were a + member. This method reduces the targetSize of the + managed instance group by the number of instances that + you delete. This operation is marked as DONE when the + action is scheduled even if the instances are still + being deleted. You must separately verify the status of + the deleting action with the listmanagedinstances + method. If the group is part of a backend service that + has enabled connection draining, it can take up to 60 + seconds after the connection draining duration has + elapsed before the VM instance is removed or deleted. + You can specify a maximum of 1000 instances with this + method per request. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInstancesInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.DeleteInstances. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_delete_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersDeleteInstancesRequest): + The body resource for this request + This corresponds to the ``instance_group_managers_delete_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_delete_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInstancesInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInstancesInstanceGroupManagerRequest): + request = compute.DeleteInstancesInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_delete_instances_request_resource is not None: + request.instance_group_managers_delete_instances_request_resource = instance_group_managers_delete_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_per_instance_configs(self, + request: Union[compute.DeletePerInstanceConfigsInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_delete_per_instance_configs_req_resource: compute.InstanceGroupManagersDeletePerInstanceConfigsReq = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes selected per-instance configs for the managed + instance group. + + Args: + request (Union[google.cloud.compute_v1.types.DeletePerInstanceConfigsInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.DeletePerInstanceConfigs. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. It + should conform to RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_delete_per_instance_configs_req_resource (google.cloud.compute_v1.types.InstanceGroupManagersDeletePerInstanceConfigsReq): + The body resource for this request + This corresponds to the ``instance_group_managers_delete_per_instance_configs_req_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_delete_per_instance_configs_req_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeletePerInstanceConfigsInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeletePerInstanceConfigsInstanceGroupManagerRequest): + request = compute.DeletePerInstanceConfigsInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_delete_per_instance_configs_req_resource is not None: + request.instance_group_managers_delete_per_instance_configs_req_resource = instance_group_managers_delete_per_instance_configs_req_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InstanceGroupManager: + r"""Returns all of the details about the specified + managed instance group. Gets a list of available managed + instance groups by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InstanceGroupManager: + Represents a Managed Instance Group + resource. An instance group is a + collection of VM instances that you can + manage as a single entity. For more + information, read Instance groups. For + zonal Managed Instance Group, use the + instanceGroupManagers resource. For + regional Managed Instance Group, use the + regionInstanceGroupManagers resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetInstanceGroupManagerRequest): + request = compute.GetInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager_resource: compute.InstanceGroupManager = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a managed instance group using the + information that you specify in the request. After the + group is created, instances in the group are created + using the specified instance template. This operation is + marked as DONE when the group is created even if the + instances in the group have not yet been created. You + must separately verify the status of the individual + instances with the listmanagedinstances method. A + managed instance group can have up to 1000 VM instances + per group. Please contact Cloud Support if you need an + increase in this limit. + + Args: + request (Union[google.cloud.compute_v1.types.InsertInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where you want + to create the managed instance group. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + This corresponds to the ``instance_group_manager_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertInstanceGroupManagerRequest): + request = compute.InsertInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager_resource is not None: + request.instance_group_manager_resource = instance_group_manager_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of managed instance groups that are + contained within the specified project and zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListInstanceGroupManagersRequest, dict]): + The request object. A request message for + InstanceGroupManagers.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_group_managers.pagers.ListPager: + [Output Only] A list of managed instance groups. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInstanceGroupManagersRequest): + request = compute.ListInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_errors(self, + request: Union[compute.ListErrorsInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListErrorsPager: + r"""Lists all errors thrown by actions on instances for a + given managed instance group. The filter and orderBy + query parameters are not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListErrorsInstanceGroupManagersRequest, dict]): + The request object. A request message for + InstanceGroupManagers.ListErrors. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. It + should conform to RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance group. It must be a + string that meets the requirements in RFC1035, or an + unsigned long integer: must match regexp pattern: + (?:`a-z `__?)|1-9{0,19}. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_group_managers.pagers.ListErrorsPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListErrorsInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListErrorsInstanceGroupManagersRequest): + request = compute.ListErrorsInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_errors] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListErrorsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_managed_instances(self, + request: Union[compute.ListManagedInstancesInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListManagedInstancesPager: + r"""Lists all of the instances in the managed instance + group. Each instance in the list has a currentAction, + which indicates the action that the managed instance + group is performing on the instance. For example, if the + group is still creating an instance, the currentAction + is CREATING. If a previous action failed, the list + displays the errors for that failed action. The orderBy + query parameter is not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListManagedInstancesInstanceGroupManagersRequest, dict]): + The request object. A request message for + InstanceGroupManagers.ListManagedInstances. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_group_managers.pagers.ListManagedInstancesPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListManagedInstancesInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListManagedInstancesInstanceGroupManagersRequest): + request = compute.ListManagedInstancesInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_managed_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListManagedInstancesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_per_instance_configs(self, + request: Union[compute.ListPerInstanceConfigsInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPerInstanceConfigsPager: + r"""Lists all of the per-instance configs defined for the + managed instance group. The orderBy query parameter is + not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListPerInstanceConfigsInstanceGroupManagersRequest, dict]): + The request object. A request message for + InstanceGroupManagers.ListPerInstanceConfigs. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. It + should conform to RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_group_managers.pagers.ListPerInstanceConfigsPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListPerInstanceConfigsInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListPerInstanceConfigsInstanceGroupManagersRequest): + request = compute.ListPerInstanceConfigsInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPerInstanceConfigsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_manager_resource: compute.InstanceGroupManager = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates a managed instance group using the + information that you specify in the request. This + operation is marked as DONE when the group is patched + even if the instances in the group are still in the + process of being patched. You must separately verify the + status of the individual instances with the + listManagedInstances method. This method supports PATCH + semantics and uses the JSON merge patch format and + processing rules. If you update your group to specify a + new template or instance configuration, it's possible + that your intended specification for each VM in the + group is different from the current state of that VM. To + learn how to apply an updated configuration to the VMs + in a MIG, see Updating instances in a MIG. + + Args: + request (Union[google.cloud.compute_v1.types.PatchInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.Patch. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where you want + to create the managed instance group. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the instance group + manager. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + This corresponds to the ``instance_group_manager_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_manager_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchInstanceGroupManagerRequest): + request = compute.PatchInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_manager_resource is not None: + request.instance_group_manager_resource = instance_group_manager_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch_per_instance_configs(self, + request: Union[compute.PatchPerInstanceConfigsInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_patch_per_instance_configs_req_resource: compute.InstanceGroupManagersPatchPerInstanceConfigsReq = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Inserts or patches per-instance configs for the + managed instance group. perInstanceConfig.name serves as + a key used to distinguish whether to perform insert or + patch. + + Args: + request (Union[google.cloud.compute_v1.types.PatchPerInstanceConfigsInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.PatchPerInstanceConfigs. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. It + should conform to RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_patch_per_instance_configs_req_resource (google.cloud.compute_v1.types.InstanceGroupManagersPatchPerInstanceConfigsReq): + The body resource for this request + This corresponds to the ``instance_group_managers_patch_per_instance_configs_req_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_patch_per_instance_configs_req_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchPerInstanceConfigsInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchPerInstanceConfigsInstanceGroupManagerRequest): + request = compute.PatchPerInstanceConfigsInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_patch_per_instance_configs_req_resource is not None: + request.instance_group_managers_patch_per_instance_configs_req_resource = instance_group_managers_patch_per_instance_configs_req_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def recreate_instances(self, + request: Union[compute.RecreateInstancesInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_recreate_instances_request_resource: compute.InstanceGroupManagersRecreateInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Flags the specified VM instances in the managed + instance group to be immediately recreated. Each + instance is recreated using the group's current + configuration. This operation is marked as DONE when the + flag is set even if the instances have not yet been + recreated. You must separately verify the status of each + instance by checking its currentAction field; for more + information, see Checking the status of managed + instances. If the group is part of a backend service + that has enabled connection draining, it can take up to + 60 seconds after the connection draining duration has + elapsed before the VM instance is removed or deleted. + You can specify a maximum of 1000 instances with this + method per request. + + Args: + request (Union[google.cloud.compute_v1.types.RecreateInstancesInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.RecreateInstances. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_recreate_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersRecreateInstancesRequest): + The body resource for this request + This corresponds to the ``instance_group_managers_recreate_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_recreate_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RecreateInstancesInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RecreateInstancesInstanceGroupManagerRequest): + request = compute.RecreateInstancesInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_recreate_instances_request_resource is not None: + request.instance_group_managers_recreate_instances_request_resource = instance_group_managers_recreate_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.recreate_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def resize(self, + request: Union[compute.ResizeInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + size: int = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Resizes the managed instance group. If you increase + the size, the group creates new instances using the + current instance template. If you decrease the size, the + group deletes instances. The resize operation is marked + DONE when the resize actions are scheduled even if the + group has not yet added or deleted any instances. You + must separately verify the status of the creating or + deleting actions with the listmanagedinstances method. + When resizing down, the instance group arbitrarily + chooses the order in which VMs are deleted. The group + takes into account some VM attributes when making the + selection including: + The status of the VM instance. + + The health of the VM instance. + The instance template + version the VM is based on. + For regional managed + instance groups, the location of the VM instance. This + list is subject to change. If the group is part of a + backend service that has enabled connection draining, it + can take up to 60 seconds after the connection draining + duration has elapsed before the VM instance is removed + or deleted. + + Args: + request (Union[google.cloud.compute_v1.types.ResizeInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.Resize. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + size (int): + The number of running instances that + the managed instance group should + maintain at any given time. The group + automatically adds or removes instances + to maintain the number of instances + specified by this parameter. + + This corresponds to the ``size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, size]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ResizeInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ResizeInstanceGroupManagerRequest): + request = compute.ResizeInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if size is not None: + request.size = size + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resize] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_instance_template(self, + request: Union[compute.SetInstanceTemplateInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_set_instance_template_request_resource: compute.InstanceGroupManagersSetInstanceTemplateRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Specifies the instance template to use when creating + new instances in this group. The templates for existing + instances in the group do not change unless you run + recreateInstances, run applyUpdatesToInstances, or set + the group's updatePolicy.type to PROACTIVE. + + Args: + request (Union[google.cloud.compute_v1.types.SetInstanceTemplateInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.SetInstanceTemplate. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_set_instance_template_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersSetInstanceTemplateRequest): + The body resource for this request + This corresponds to the ``instance_group_managers_set_instance_template_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_set_instance_template_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetInstanceTemplateInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetInstanceTemplateInstanceGroupManagerRequest): + request = compute.SetInstanceTemplateInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_set_instance_template_request_resource is not None: + request.instance_group_managers_set_instance_template_request_resource = instance_group_managers_set_instance_template_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_instance_template] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_target_pools(self, + request: Union[compute.SetTargetPoolsInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_set_target_pools_request_resource: compute.InstanceGroupManagersSetTargetPoolsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Modifies the target pools to which all instances in + this managed instance group are assigned. The target + pools automatically apply to all of the instances in the + managed instance group. This operation is marked DONE + when you make the request even if the instances have not + yet been added to their target pools. The change might + take some time to apply to all of the instances in the + group depending on the size of the group. + + Args: + request (Union[google.cloud.compute_v1.types.SetTargetPoolsInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.SetTargetPools. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_set_target_pools_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersSetTargetPoolsRequest): + The body resource for this request + This corresponds to the ``instance_group_managers_set_target_pools_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_set_target_pools_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetTargetPoolsInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetTargetPoolsInstanceGroupManagerRequest): + request = compute.SetTargetPoolsInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_set_target_pools_request_resource is not None: + request.instance_group_managers_set_target_pools_request_resource = instance_group_managers_set_target_pools_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_target_pools] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_per_instance_configs(self, + request: Union[compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_manager: str = None, + instance_group_managers_update_per_instance_configs_req_resource: compute.InstanceGroupManagersUpdatePerInstanceConfigsReq = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Inserts or updates per-instance configs for the + managed instance group. perInstanceConfig.name serves as + a key used to distinguish whether to perform insert or + patch. + + Args: + request (Union[google.cloud.compute_v1.types.UpdatePerInstanceConfigsInstanceGroupManagerRequest, dict]): + The request object. A request message for + InstanceGroupManagers.UpdatePerInstanceConfigs. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + managed instance group is located. It + should conform to RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_managers_update_per_instance_configs_req_resource (google.cloud.compute_v1.types.InstanceGroupManagersUpdatePerInstanceConfigsReq): + The body resource for this request + This corresponds to the ``instance_group_managers_update_per_instance_configs_req_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_manager, instance_group_managers_update_per_instance_configs_req_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest): + request = compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_managers_update_per_instance_configs_req_resource is not None: + request.instance_group_managers_update_per_instance_configs_req_resource = instance_group_managers_update_per_instance_configs_req_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "InstanceGroupManagersClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/pagers.py new file mode 100644 index 000000000..86068041c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/pagers.py @@ -0,0 +1,316 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupManagerAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupManagerAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupManagerAggregatedList], + request: compute.AggregatedListInstanceGroupManagersRequest, + response: compute.InstanceGroupManagerAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupManagerAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupManagerAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.InstanceGroupManagersScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.InstanceGroupManagersScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupManagerList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupManagerList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupManagerList], + request: compute.ListInstanceGroupManagersRequest, + response: compute.InstanceGroupManagerList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupManagerList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupManagerList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceGroupManager]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListErrorsPager: + """A pager for iterating through ``list_errors`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupManagersListErrorsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListErrors`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupManagersListErrorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupManagersListErrorsResponse], + request: compute.ListErrorsInstanceGroupManagersRequest, + response: compute.InstanceGroupManagersListErrorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListErrorsInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupManagersListErrorsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListErrorsInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupManagersListErrorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceManagedByIgmError]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListManagedInstancesPager: + """A pager for iterating through ``list_managed_instances`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupManagersListManagedInstancesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``managed_instances`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListManagedInstances`` requests and continue to iterate + through the ``managed_instances`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupManagersListManagedInstancesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupManagersListManagedInstancesResponse], + request: compute.ListManagedInstancesInstanceGroupManagersRequest, + response: compute.InstanceGroupManagersListManagedInstancesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListManagedInstancesInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupManagersListManagedInstancesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListManagedInstancesInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupManagersListManagedInstancesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ManagedInstance]: + for page in self.pages: + yield from page.managed_instances + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPerInstanceConfigsPager: + """A pager for iterating through ``list_per_instance_configs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupManagersListPerInstanceConfigsResp` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPerInstanceConfigs`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupManagersListPerInstanceConfigsResp` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupManagersListPerInstanceConfigsResp], + request: compute.ListPerInstanceConfigsInstanceGroupManagersRequest, + response: compute.InstanceGroupManagersListPerInstanceConfigsResp, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListPerInstanceConfigsInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupManagersListPerInstanceConfigsResp): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListPerInstanceConfigsInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupManagersListPerInstanceConfigsResp]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.PerInstanceConfig]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/__init__.py new file mode 100644 index 000000000..7aa293e29 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import InstanceGroupManagersTransport +from .rest import InstanceGroupManagersRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InstanceGroupManagersTransport]] +_transport_registry['rest'] = InstanceGroupManagersRestTransport + +__all__ = ( + 'InstanceGroupManagersTransport', + 'InstanceGroupManagersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/base.py new file mode 100644 index 000000000..d9dd665e2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/base.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InstanceGroupManagersTransport(abc.ABC): + """Abstract transport class for InstanceGroupManagers.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.abandon_instances: gapic_v1.method.wrap_method( + self.abandon_instances, + default_timeout=None, + client_info=client_info, + ), + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.apply_updates_to_instances: gapic_v1.method.wrap_method( + self.apply_updates_to_instances, + default_timeout=None, + client_info=client_info, + ), + self.create_instances: gapic_v1.method.wrap_method( + self.create_instances, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.delete_instances: gapic_v1.method.wrap_method( + self.delete_instances, + default_timeout=None, + client_info=client_info, + ), + self.delete_per_instance_configs: gapic_v1.method.wrap_method( + self.delete_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_errors: gapic_v1.method.wrap_method( + self.list_errors, + default_timeout=None, + client_info=client_info, + ), + self.list_managed_instances: gapic_v1.method.wrap_method( + self.list_managed_instances, + default_timeout=None, + client_info=client_info, + ), + self.list_per_instance_configs: gapic_v1.method.wrap_method( + self.list_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.patch_per_instance_configs: gapic_v1.method.wrap_method( + self.patch_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + self.recreate_instances: gapic_v1.method.wrap_method( + self.recreate_instances, + default_timeout=None, + client_info=client_info, + ), + self.resize: gapic_v1.method.wrap_method( + self.resize, + default_timeout=None, + client_info=client_info, + ), + self.set_instance_template: gapic_v1.method.wrap_method( + self.set_instance_template, + default_timeout=None, + client_info=client_info, + ), + self.set_target_pools: gapic_v1.method.wrap_method( + self.set_target_pools, + default_timeout=None, + client_info=client_info, + ), + self.update_per_instance_configs: gapic_v1.method.wrap_method( + self.update_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def abandon_instances(self) -> Callable[ + [compute.AbandonInstancesInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInstanceGroupManagersRequest], + Union[ + compute.InstanceGroupManagerAggregatedList, + Awaitable[compute.InstanceGroupManagerAggregatedList] + ]]: + raise NotImplementedError() + + @property + def apply_updates_to_instances(self) -> Callable[ + [compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def create_instances(self) -> Callable[ + [compute.CreateInstancesInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_instances(self) -> Callable[ + [compute.DeleteInstancesInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_per_instance_configs(self) -> Callable[ + [compute.DeletePerInstanceConfigsInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetInstanceGroupManagerRequest], + Union[ + compute.InstanceGroupManager, + Awaitable[compute.InstanceGroupManager] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListInstanceGroupManagersRequest], + Union[ + compute.InstanceGroupManagerList, + Awaitable[compute.InstanceGroupManagerList] + ]]: + raise NotImplementedError() + + @property + def list_errors(self) -> Callable[ + [compute.ListErrorsInstanceGroupManagersRequest], + Union[ + compute.InstanceGroupManagersListErrorsResponse, + Awaitable[compute.InstanceGroupManagersListErrorsResponse] + ]]: + raise NotImplementedError() + + @property + def list_managed_instances(self) -> Callable[ + [compute.ListManagedInstancesInstanceGroupManagersRequest], + Union[ + compute.InstanceGroupManagersListManagedInstancesResponse, + Awaitable[compute.InstanceGroupManagersListManagedInstancesResponse] + ]]: + raise NotImplementedError() + + @property + def list_per_instance_configs(self) -> Callable[ + [compute.ListPerInstanceConfigsInstanceGroupManagersRequest], + Union[ + compute.InstanceGroupManagersListPerInstanceConfigsResp, + Awaitable[compute.InstanceGroupManagersListPerInstanceConfigsResp] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def patch_per_instance_configs(self) -> Callable[ + [compute.PatchPerInstanceConfigsInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def recreate_instances(self) -> Callable[ + [compute.RecreateInstancesInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def resize(self) -> Callable[ + [compute.ResizeInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_instance_template(self) -> Callable[ + [compute.SetInstanceTemplateInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_target_pools(self) -> Callable[ + [compute.SetTargetPoolsInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update_per_instance_configs(self) -> Callable[ + [compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'InstanceGroupManagersTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/rest.py new file mode 100644 index 000000000..3c16195b8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_group_managers/transports/rest.py @@ -0,0 +1,2406 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import InstanceGroupManagersTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class InstanceGroupManagersRestTransport(InstanceGroupManagersTransport): + """REST backend transport for InstanceGroupManagers. + + The InstanceGroupManagers API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _abandon_instances(self, + request: compute.AbandonInstancesInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the abandon instances method over HTTP. + + Args: + request (~.compute.AbandonInstancesInstanceGroupManagerRequest): + The request object. Messages + A request message for + InstanceGroupManagers.AbandonInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/abandonInstances', + 'body': 'instance_group_managers_abandon_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AbandonInstancesInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersAbandonInstancesRequest.to_json( + compute.InstanceGroupManagersAbandonInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AbandonInstancesInstanceGroupManagerRequest.to_json( + compute.AbandonInstancesInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _aggregated_list(self, + request: compute.AggregatedListInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupManagerAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListInstanceGroupManagersRequest): + The request object. A request message for + InstanceGroupManagers.AggregatedList. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupManagerAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/instanceGroupManagers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListInstanceGroupManagersRequest.to_json( + compute.AggregatedListInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupManagerAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _apply_updates_to_instances(self, + request: compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the apply updates to + instances method over HTTP. + + Args: + request (~.compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.ApplyUpdatesToInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/applyUpdatesToInstances', + 'body': 'instance_group_managers_apply_updates_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersApplyUpdatesRequest.to_json( + compute.InstanceGroupManagersApplyUpdatesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest.to_json( + compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _create_instances(self, + request: compute.CreateInstancesInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the create instances method over HTTP. + + Args: + request (~.compute.CreateInstancesInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.CreateInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/createInstances', + 'body': 'instance_group_managers_create_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.CreateInstancesInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersCreateInstancesRequest.to_json( + compute.InstanceGroupManagersCreateInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.CreateInstancesInstanceGroupManagerRequest.to_json( + compute.CreateInstancesInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInstanceGroupManagerRequest.to_json( + compute.DeleteInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_instances(self, + request: compute.DeleteInstancesInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete instances method over HTTP. + + Args: + request (~.compute.DeleteInstancesInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.DeleteInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/deleteInstances', + 'body': 'instance_group_managers_delete_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteInstancesInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersDeleteInstancesRequest.to_json( + compute.InstanceGroupManagersDeleteInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInstancesInstanceGroupManagerRequest.to_json( + compute.DeleteInstancesInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_per_instance_configs(self, + request: compute.DeletePerInstanceConfigsInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete per instance + configs method over HTTP. + + Args: + request (~.compute.DeletePerInstanceConfigsInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.DeletePerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/deletePerInstanceConfigs', + 'body': 'instance_group_managers_delete_per_instance_configs_req_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeletePerInstanceConfigsInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersDeletePerInstanceConfigsReq.to_json( + compute.InstanceGroupManagersDeletePerInstanceConfigsReq( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeletePerInstanceConfigsInstanceGroupManagerRequest.to_json( + compute.DeletePerInstanceConfigsInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupManager: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupManager: + Represents a Managed Instance Group + resource. An instance group is a + collection of VM instances that you can + manage as a single entity. For more + information, read Instance groups. For + zonal Managed Instance Group, use the + instanceGroupManagers resource. For + regional Managed Instance Group, use the + regionInstanceGroupManagers resource. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetInstanceGroupManagerRequest.to_json( + compute.GetInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupManager.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers', + 'body': 'instance_group_manager_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManager.to_json( + compute.InstanceGroupManager( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertInstanceGroupManagerRequest.to_json( + compute.InsertInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupManagerList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListInstanceGroupManagersRequest): + The request object. A request message for + InstanceGroupManagers.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupManagerList: + [Output Only] A list of managed instance groups. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInstanceGroupManagersRequest.to_json( + compute.ListInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupManagerList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_errors(self, + request: compute.ListErrorsInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupManagersListErrorsResponse: + r"""Call the list errors method over HTTP. + + Args: + request (~.compute.ListErrorsInstanceGroupManagersRequest): + The request object. A request message for + InstanceGroupManagers.ListErrors. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupManagersListErrorsResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/listErrors', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListErrorsInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListErrorsInstanceGroupManagersRequest.to_json( + compute.ListErrorsInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupManagersListErrorsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_managed_instances(self, + request: compute.ListManagedInstancesInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupManagersListManagedInstancesResponse: + r"""Call the list managed instances method over HTTP. + + Args: + request (~.compute.ListManagedInstancesInstanceGroupManagersRequest): + The request object. A request message for + InstanceGroupManagers.ListManagedInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupManagersListManagedInstancesResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/listManagedInstances', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListManagedInstancesInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListManagedInstancesInstanceGroupManagersRequest.to_json( + compute.ListManagedInstancesInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupManagersListManagedInstancesResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_per_instance_configs(self, + request: compute.ListPerInstanceConfigsInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupManagersListPerInstanceConfigsResp: + r"""Call the list per instance configs method over HTTP. + + Args: + request (~.compute.ListPerInstanceConfigsInstanceGroupManagersRequest): + The request object. A request message for + InstanceGroupManagers.ListPerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupManagersListPerInstanceConfigsResp: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/listPerInstanceConfigs', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListPerInstanceConfigsInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListPerInstanceConfigsInstanceGroupManagersRequest.to_json( + compute.ListPerInstanceConfigsInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupManagersListPerInstanceConfigsResp.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.Patch. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}', + 'body': 'instance_group_manager_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.PatchInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManager.to_json( + compute.InstanceGroupManager( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchInstanceGroupManagerRequest.to_json( + compute.PatchInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch_per_instance_configs(self, + request: compute.PatchPerInstanceConfigsInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch per instance + configs method over HTTP. + + Args: + request (~.compute.PatchPerInstanceConfigsInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.PatchPerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/patchPerInstanceConfigs', + 'body': 'instance_group_managers_patch_per_instance_configs_req_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.PatchPerInstanceConfigsInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersPatchPerInstanceConfigsReq.to_json( + compute.InstanceGroupManagersPatchPerInstanceConfigsReq( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchPerInstanceConfigsInstanceGroupManagerRequest.to_json( + compute.PatchPerInstanceConfigsInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _recreate_instances(self, + request: compute.RecreateInstancesInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the recreate instances method over HTTP. + + Args: + request (~.compute.RecreateInstancesInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.RecreateInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/recreateInstances', + 'body': 'instance_group_managers_recreate_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.RecreateInstancesInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersRecreateInstancesRequest.to_json( + compute.InstanceGroupManagersRecreateInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RecreateInstancesInstanceGroupManagerRequest.to_json( + compute.RecreateInstancesInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _resize(self, + request: compute.ResizeInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the resize method over HTTP. + + Args: + request (~.compute.ResizeInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.Resize. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/resize', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "size", + "size" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ResizeInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ResizeInstanceGroupManagerRequest.to_json( + compute.ResizeInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_instance_template(self, + request: compute.SetInstanceTemplateInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set instance template method over HTTP. + + Args: + request (~.compute.SetInstanceTemplateInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.SetInstanceTemplate. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/setInstanceTemplate', + 'body': 'instance_group_managers_set_instance_template_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetInstanceTemplateInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersSetInstanceTemplateRequest.to_json( + compute.InstanceGroupManagersSetInstanceTemplateRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetInstanceTemplateInstanceGroupManagerRequest.to_json( + compute.SetInstanceTemplateInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_target_pools(self, + request: compute.SetTargetPoolsInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set target pools method over HTTP. + + Args: + request (~.compute.SetTargetPoolsInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.SetTargetPools. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/setTargetPools', + 'body': 'instance_group_managers_set_target_pools_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetTargetPoolsInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersSetTargetPoolsRequest.to_json( + compute.InstanceGroupManagersSetTargetPoolsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetTargetPoolsInstanceGroupManagerRequest.to_json( + compute.SetTargetPoolsInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update_per_instance_configs(self, + request: compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update per instance + configs method over HTTP. + + Args: + request (~.compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest): + The request object. A request message for + InstanceGroupManagers.UpdatePerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/updatePerInstanceConfigs', + 'body': 'instance_group_managers_update_per_instance_configs_req_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManagersUpdatePerInstanceConfigsReq.to_json( + compute.InstanceGroupManagersUpdatePerInstanceConfigsReq( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest.to_json( + compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def abandon_instances(self) -> Callable[ + [compute.AbandonInstancesInstanceGroupManagerRequest], + compute.Operation]: + return self._abandon_instances + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInstanceGroupManagersRequest], + compute.InstanceGroupManagerAggregatedList]: + return self._aggregated_list + @ property + def apply_updates_to_instances(self) -> Callable[ + [compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest], + compute.Operation]: + return self._apply_updates_to_instances + @ property + def create_instances(self) -> Callable[ + [compute.CreateInstancesInstanceGroupManagerRequest], + compute.Operation]: + return self._create_instances + @ property + def delete(self) -> Callable[ + [compute.DeleteInstanceGroupManagerRequest], + compute.Operation]: + return self._delete + @ property + def delete_instances(self) -> Callable[ + [compute.DeleteInstancesInstanceGroupManagerRequest], + compute.Operation]: + return self._delete_instances + @ property + def delete_per_instance_configs(self) -> Callable[ + [compute.DeletePerInstanceConfigsInstanceGroupManagerRequest], + compute.Operation]: + return self._delete_per_instance_configs + @ property + def get(self) -> Callable[ + [compute.GetInstanceGroupManagerRequest], + compute.InstanceGroupManager]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertInstanceGroupManagerRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListInstanceGroupManagersRequest], + compute.InstanceGroupManagerList]: + return self._list + @ property + def list_errors(self) -> Callable[ + [compute.ListErrorsInstanceGroupManagersRequest], + compute.InstanceGroupManagersListErrorsResponse]: + return self._list_errors + @ property + def list_managed_instances(self) -> Callable[ + [compute.ListManagedInstancesInstanceGroupManagersRequest], + compute.InstanceGroupManagersListManagedInstancesResponse]: + return self._list_managed_instances + @ property + def list_per_instance_configs(self) -> Callable[ + [compute.ListPerInstanceConfigsInstanceGroupManagersRequest], + compute.InstanceGroupManagersListPerInstanceConfigsResp]: + return self._list_per_instance_configs + @ property + def patch(self) -> Callable[ + [compute.PatchInstanceGroupManagerRequest], + compute.Operation]: + return self._patch + @ property + def patch_per_instance_configs(self) -> Callable[ + [compute.PatchPerInstanceConfigsInstanceGroupManagerRequest], + compute.Operation]: + return self._patch_per_instance_configs + @ property + def recreate_instances(self) -> Callable[ + [compute.RecreateInstancesInstanceGroupManagerRequest], + compute.Operation]: + return self._recreate_instances + @ property + def resize(self) -> Callable[ + [compute.ResizeInstanceGroupManagerRequest], + compute.Operation]: + return self._resize + @ property + def set_instance_template(self) -> Callable[ + [compute.SetInstanceTemplateInstanceGroupManagerRequest], + compute.Operation]: + return self._set_instance_template + @ property + def set_target_pools(self) -> Callable[ + [compute.SetTargetPoolsInstanceGroupManagerRequest], + compute.Operation]: + return self._set_target_pools + @ property + def update_per_instance_configs(self) -> Callable[ + [compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest], + compute.Operation]: + return self._update_per_instance_configs + def close(self): + self._session.close() + + +__all__=( + 'InstanceGroupManagersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/__init__.py new file mode 100644 index 000000000..6b7d2420c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import InstanceGroupsClient + +__all__ = ( + 'InstanceGroupsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/client.py new file mode 100644 index 000000000..87df8f430 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/client.py @@ -0,0 +1,1258 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.instance_groups import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import InstanceGroupsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import InstanceGroupsRestTransport + + +class InstanceGroupsClientMeta(type): + """Metaclass for the InstanceGroups client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[InstanceGroupsTransport]] + _transport_registry["rest"] = InstanceGroupsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[InstanceGroupsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InstanceGroupsClient(metaclass=InstanceGroupsClientMeta): + """The InstanceGroups API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InstanceGroupsTransport: + """Returns the transport used by the client instance. + + Returns: + InstanceGroupsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, InstanceGroupsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the instance groups client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, InstanceGroupsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InstanceGroupsTransport): + # transport is a InstanceGroupsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_instances(self, + request: Union[compute.AddInstancesInstanceGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group: str = None, + instance_groups_add_instances_request_resource: compute.InstanceGroupsAddInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds a list of instances to the specified instance + group. All of the instances in the instance group must + be in the same network/subnetwork. Read Adding instances + for more information. + + Args: + request (Union[google.cloud.compute_v1.types.AddInstancesInstanceGroupRequest, dict]): + The request object. A request message for + InstanceGroups.AddInstances. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + The name of the instance group where + you are adding instances. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_groups_add_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupsAddInstancesRequest): + The body resource for this request + This corresponds to the ``instance_groups_add_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group, instance_groups_add_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddInstancesInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddInstancesInstanceGroupRequest): + request = compute.AddInstancesInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group is not None: + request.instance_group = instance_group + if instance_groups_add_instances_request_resource is not None: + request.instance_groups_add_instances_request_resource = instance_groups_add_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def aggregated_list(self, + request: Union[compute.AggregatedListInstanceGroupsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of instance groups and sorts them + by zone. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListInstanceGroupsRequest, dict]): + The request object. A request message for + InstanceGroups.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_groups.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListInstanceGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListInstanceGroupsRequest): + request = compute.AggregatedListInstanceGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteInstanceGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified instance group. The instances + in the group are not deleted. Note that instance group + must not belong to a backend service. Read Deleting an + instance group for more information. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInstanceGroupRequest, dict]): + The request object. A request message for + InstanceGroups.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + The name of the instance group to + delete. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInstanceGroupRequest): + request = compute.DeleteInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group is not None: + request.instance_group = instance_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetInstanceGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InstanceGroup: + r"""Returns the specified zonal instance group. Get a + list of available zonal instance groups by making a + list() request. For managed instance groups, use the + instanceGroupManagers or regionInstanceGroupManagers + methods instead. + + Args: + request (Union[google.cloud.compute_v1.types.GetInstanceGroupRequest, dict]): + The request object. A request message for + InstanceGroups.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + The name of the instance group. + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InstanceGroup: + Represents an Instance Group + resource. Instance Groups can be used to + configure a target for load balancing. + Instance groups can either be managed or + unmanaged. To create managed instance + groups, use the instanceGroupManager or + regionInstanceGroupManager resource + instead. Use zonal unmanaged instance + groups if you need to apply load + balancing to groups of heterogeneous + instances or if you need to manage the + instances yourself. You cannot create + regional unmanaged instance groups. For + more information, read Instance groups. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetInstanceGroupRequest): + request = compute.GetInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group is not None: + request.instance_group = instance_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertInstanceGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group_resource: compute.InstanceGroup = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an instance group in the specified project + using the parameters that are included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertInstanceGroupRequest, dict]): + The request object. A request message for + InstanceGroups.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where you want + to create the instance group. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_resource (google.cloud.compute_v1.types.InstanceGroup): + The body resource for this request + This corresponds to the ``instance_group_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertInstanceGroupRequest): + request = compute.InsertInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group_resource is not None: + request.instance_group_resource = instance_group_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListInstanceGroupsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of zonal instance group resources + contained within the specified zone. For managed + instance groups, use the instanceGroupManagers or + regionInstanceGroupManagers methods instead. + + Args: + request (Union[google.cloud.compute_v1.types.ListInstanceGroupsRequest, dict]): + The request object. A request message for + InstanceGroups.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_groups.pagers.ListPager: + A list of InstanceGroup resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInstanceGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInstanceGroupsRequest): + request = compute.ListInstanceGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_instances(self, + request: Union[compute.ListInstancesInstanceGroupsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group: str = None, + instance_groups_list_instances_request_resource: compute.InstanceGroupsListInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListInstancesPager: + r"""Lists the instances in the specified instance group. + The orderBy query parameter is not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListInstancesInstanceGroupsRequest, dict]): + The request object. A request message for + InstanceGroups.ListInstances. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + The name of the instance group from + which you want to generate a list of + included instances. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_groups_list_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupsListInstancesRequest): + The body resource for this request + This corresponds to the ``instance_groups_list_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_groups.pagers.ListInstancesPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group, instance_groups_list_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInstancesInstanceGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInstancesInstanceGroupsRequest): + request = compute.ListInstancesInstanceGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group is not None: + request.instance_group = instance_group + if instance_groups_list_instances_request_resource is not None: + request.instance_groups_list_instances_request_resource = instance_groups_list_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListInstancesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_instances(self, + request: Union[compute.RemoveInstancesInstanceGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group: str = None, + instance_groups_remove_instances_request_resource: compute.InstanceGroupsRemoveInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes one or more instances from the specified + instance group, but does not delete those instances. If + the group is part of a backend service that has enabled + connection draining, it can take up to 60 seconds after + the connection draining duration before the VM instance + is removed or deleted. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveInstancesInstanceGroupRequest, dict]): + The request object. A request message for + InstanceGroups.RemoveInstances. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + The name of the instance group where + the specified instances will be removed. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_groups_remove_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupsRemoveInstancesRequest): + The body resource for this request + This corresponds to the ``instance_groups_remove_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group, instance_groups_remove_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveInstancesInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveInstancesInstanceGroupRequest): + request = compute.RemoveInstancesInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group is not None: + request.instance_group = instance_group + if instance_groups_remove_instances_request_resource is not None: + request.instance_groups_remove_instances_request_resource = instance_groups_remove_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_named_ports(self, + request: Union[compute.SetNamedPortsInstanceGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_group: str = None, + instance_groups_set_named_ports_request_resource: compute.InstanceGroupsSetNamedPortsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the named ports for the specified instance + group. + + Args: + request (Union[google.cloud.compute_v1.types.SetNamedPortsInstanceGroupRequest, dict]): + The request object. A request message for + InstanceGroups.SetNamedPorts. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + instance group is located. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + The name of the instance group where + the named ports are updated. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_groups_set_named_ports_request_resource (google.cloud.compute_v1.types.InstanceGroupsSetNamedPortsRequest): + The body resource for this request + This corresponds to the ``instance_groups_set_named_ports_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_group, instance_groups_set_named_ports_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetNamedPortsInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetNamedPortsInstanceGroupRequest): + request = compute.SetNamedPortsInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_group is not None: + request.instance_group = instance_group + if instance_groups_set_named_ports_request_resource is not None: + request.instance_groups_set_named_ports_request_resource = instance_groups_set_named_ports_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_named_ports] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "InstanceGroupsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/pagers.py new file mode 100644 index 000000000..a707a2559 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/pagers.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupAggregatedList], + request: compute.AggregatedListInstanceGroupsRequest, + response: compute.InstanceGroupAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListInstanceGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListInstanceGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.InstanceGroupsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.InstanceGroupsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupList], + request: compute.ListInstanceGroupsRequest, + response: compute.InstanceGroupList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInstanceGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInstanceGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceGroup]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListInstancesPager: + """A pager for iterating through ``list_instances`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceGroupsListInstances` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListInstances`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceGroupsListInstances` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceGroupsListInstances], + request: compute.ListInstancesInstanceGroupsRequest, + response: compute.InstanceGroupsListInstances, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInstancesInstanceGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceGroupsListInstances): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInstancesInstanceGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceGroupsListInstances]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceWithNamedPorts]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/__init__.py new file mode 100644 index 000000000..645607299 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import InstanceGroupsTransport +from .rest import InstanceGroupsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InstanceGroupsTransport]] +_transport_registry['rest'] = InstanceGroupsRestTransport + +__all__ = ( + 'InstanceGroupsTransport', + 'InstanceGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/base.py new file mode 100644 index 000000000..4acc68275 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InstanceGroupsTransport(abc.ABC): + """Abstract transport class for InstanceGroups.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_instances: gapic_v1.method.wrap_method( + self.add_instances, + default_timeout=None, + client_info=client_info, + ), + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_instances: gapic_v1.method.wrap_method( + self.list_instances, + default_timeout=None, + client_info=client_info, + ), + self.remove_instances: gapic_v1.method.wrap_method( + self.remove_instances, + default_timeout=None, + client_info=client_info, + ), + self.set_named_ports: gapic_v1.method.wrap_method( + self.set_named_ports, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_instances(self) -> Callable[ + [compute.AddInstancesInstanceGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInstanceGroupsRequest], + Union[ + compute.InstanceGroupAggregatedList, + Awaitable[compute.InstanceGroupAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteInstanceGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetInstanceGroupRequest], + Union[ + compute.InstanceGroup, + Awaitable[compute.InstanceGroup] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertInstanceGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListInstanceGroupsRequest], + Union[ + compute.InstanceGroupList, + Awaitable[compute.InstanceGroupList] + ]]: + raise NotImplementedError() + + @property + def list_instances(self) -> Callable[ + [compute.ListInstancesInstanceGroupsRequest], + Union[ + compute.InstanceGroupsListInstances, + Awaitable[compute.InstanceGroupsListInstances] + ]]: + raise NotImplementedError() + + @property + def remove_instances(self) -> Callable[ + [compute.RemoveInstancesInstanceGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_named_ports(self) -> Callable[ + [compute.SetNamedPortsInstanceGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'InstanceGroupsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/rest.py new file mode 100644 index 000000000..c5aa5887a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_groups/transports/rest.py @@ -0,0 +1,1126 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import InstanceGroupsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class InstanceGroupsRestTransport(InstanceGroupsTransport): + """REST backend transport for InstanceGroups. + + The InstanceGroups API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_instances(self, + request: compute.AddInstancesInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add instances method over HTTP. + + Args: + request (~.compute.AddInstancesInstanceGroupRequest): + The request object. A request message for + InstanceGroups.AddInstances. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/addInstances', + 'body': 'instance_groups_add_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AddInstancesInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupsAddInstancesRequest.to_json( + compute.InstanceGroupsAddInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddInstancesInstanceGroupRequest.to_json( + compute.AddInstancesInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _aggregated_list(self, + request: compute.AggregatedListInstanceGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListInstanceGroupsRequest): + The request object. A request message for + InstanceGroups.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/instanceGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListInstanceGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListInstanceGroupsRequest.to_json( + compute.AggregatedListInstanceGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteInstanceGroupRequest): + The request object. A request message for + InstanceGroups.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInstanceGroupRequest.to_json( + compute.DeleteInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroup: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetInstanceGroupRequest): + The request object. A request message for + InstanceGroups.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroup: + Represents an Instance Group + resource. Instance Groups can be used to + configure a target for load balancing. + Instance groups can either be managed or + unmanaged. To create managed instance + groups, use the instanceGroupManager or + regionInstanceGroupManager resource + instead. Use zonal unmanaged instance + groups if you need to apply load + balancing to groups of heterogeneous + instances or if you need to manage the + instances yourself. You cannot create + regional unmanaged instance groups. For + more information, read Instance groups. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetInstanceGroupRequest.to_json( + compute.GetInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroup.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertInstanceGroupRequest): + The request object. A request message for + InstanceGroups.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups', + 'body': 'instance_group_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroup.to_json( + compute.InstanceGroup( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertInstanceGroupRequest.to_json( + compute.InsertInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListInstanceGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListInstanceGroupsRequest): + The request object. A request message for + InstanceGroups.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupList: + A list of InstanceGroup resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListInstanceGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInstanceGroupsRequest.to_json( + compute.ListInstanceGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_instances(self, + request: compute.ListInstancesInstanceGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupsListInstances: + r"""Call the list instances method over HTTP. + + Args: + request (~.compute.ListInstancesInstanceGroupsRequest): + The request object. A request message for + InstanceGroups.ListInstances. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupsListInstances: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/listInstances', + 'body': 'instance_groups_list_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListInstancesInstanceGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupsListInstancesRequest.to_json( + compute.InstanceGroupsListInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInstancesInstanceGroupsRequest.to_json( + compute.ListInstancesInstanceGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupsListInstances.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_instances(self, + request: compute.RemoveInstancesInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove instances method over HTTP. + + Args: + request (~.compute.RemoveInstancesInstanceGroupRequest): + The request object. A request message for + InstanceGroups.RemoveInstances. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/removeInstances', + 'body': 'instance_groups_remove_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.RemoveInstancesInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupsRemoveInstancesRequest.to_json( + compute.InstanceGroupsRemoveInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveInstancesInstanceGroupRequest.to_json( + compute.RemoveInstancesInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_named_ports(self, + request: compute.SetNamedPortsInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set named ports method over HTTP. + + Args: + request (~.compute.SetNamedPortsInstanceGroupRequest): + The request object. A request message for + InstanceGroups.SetNamedPorts. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/setNamedPorts', + 'body': 'instance_groups_set_named_ports_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetNamedPortsInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupsSetNamedPortsRequest.to_json( + compute.InstanceGroupsSetNamedPortsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetNamedPortsInstanceGroupRequest.to_json( + compute.SetNamedPortsInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_instances(self) -> Callable[ + [compute.AddInstancesInstanceGroupRequest], + compute.Operation]: + return self._add_instances + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInstanceGroupsRequest], + compute.InstanceGroupAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteInstanceGroupRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetInstanceGroupRequest], + compute.InstanceGroup]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertInstanceGroupRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListInstanceGroupsRequest], + compute.InstanceGroupList]: + return self._list + @ property + def list_instances(self) -> Callable[ + [compute.ListInstancesInstanceGroupsRequest], + compute.InstanceGroupsListInstances]: + return self._list_instances + @ property + def remove_instances(self) -> Callable[ + [compute.RemoveInstancesInstanceGroupRequest], + compute.Operation]: + return self._remove_instances + @ property + def set_named_ports(self) -> Callable[ + [compute.SetNamedPortsInstanceGroupRequest], + compute.Operation]: + return self._set_named_ports + def close(self): + self._session.close() + + +__all__=( + 'InstanceGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/__init__.py new file mode 100644 index 000000000..3f86bfc03 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import InstanceTemplatesClient + +__all__ = ( + 'InstanceTemplatesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/client.py new file mode 100644 index 000000000..1dd5b7f5f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/client.py @@ -0,0 +1,1015 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.instance_templates import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import InstanceTemplatesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import InstanceTemplatesRestTransport + + +class InstanceTemplatesClientMeta(type): + """Metaclass for the InstanceTemplates client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[InstanceTemplatesTransport]] + _transport_registry["rest"] = InstanceTemplatesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[InstanceTemplatesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InstanceTemplatesClient(metaclass=InstanceTemplatesClientMeta): + """The InstanceTemplates API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceTemplatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstanceTemplatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InstanceTemplatesTransport: + """Returns the transport used by the client instance. + + Returns: + InstanceTemplatesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, InstanceTemplatesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the instance templates client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, InstanceTemplatesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InstanceTemplatesTransport): + # transport is a InstanceTemplatesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteInstanceTemplateRequest, dict] = None, + *, + project: str = None, + instance_template: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified instance template. Deleting an + instance template is permanent and cannot be undone. It + is not possible to delete templates that are already in + use by a managed instance group. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInstanceTemplateRequest, dict]): + The request object. A request message for + InstanceTemplates.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_template (str): + The name of the instance template to + delete. + + This corresponds to the ``instance_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, instance_template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInstanceTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInstanceTemplateRequest): + request = compute.DeleteInstanceTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if instance_template is not None: + request.instance_template = instance_template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetInstanceTemplateRequest, dict] = None, + *, + project: str = None, + instance_template: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InstanceTemplate: + r"""Returns the specified instance template. Gets a list + of available instance templates by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetInstanceTemplateRequest, dict]): + The request object. A request message for + InstanceTemplates.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_template (str): + The name of the instance template. + This corresponds to the ``instance_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InstanceTemplate: + Represents an Instance Template + resource. You can use instance templates + to create VM instances and managed + instance groups. For more information, + read Instance Templates. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, instance_template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetInstanceTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetInstanceTemplateRequest): + request = compute.GetInstanceTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if instance_template is not None: + request.instance_template = instance_template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyInstanceTemplateRequest, dict] = None, + *, + project: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyInstanceTemplateRequest, dict]): + The request object. A request message for + InstanceTemplates.GetIamPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyInstanceTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyInstanceTemplateRequest): + request = compute.GetIamPolicyInstanceTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertInstanceTemplateRequest, dict] = None, + *, + project: str = None, + instance_template_resource: compute.InstanceTemplate = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an instance template in the specified project + using the data that is included in the request. If you + are creating a new template to update an existing + instance group, your new instance template must use the + same network or, if applicable, the same subnetwork as + the original template. + + Args: + request (Union[google.cloud.compute_v1.types.InsertInstanceTemplateRequest, dict]): + The request object. A request message for + InstanceTemplates.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_template_resource (google.cloud.compute_v1.types.InstanceTemplate): + The body resource for this request + This corresponds to the ``instance_template_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, instance_template_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertInstanceTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertInstanceTemplateRequest): + request = compute.InsertInstanceTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if instance_template_resource is not None: + request.instance_template_resource = instance_template_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListInstanceTemplatesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of instance templates that are + contained within the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListInstanceTemplatesRequest, dict]): + The request object. A request message for + InstanceTemplates.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instance_templates.pagers.ListPager: + A list of instance templates. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInstanceTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInstanceTemplatesRequest): + request = compute.ListInstanceTemplatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyInstanceTemplateRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_policy_request_resource: compute.GlobalSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyInstanceTemplateRequest, dict]): + The request object. A request message for + InstanceTemplates.SetIamPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + This corresponds to the ``global_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyInstanceTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyInstanceTemplateRequest): + request = compute.SetIamPolicyInstanceTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_policy_request_resource is not None: + request.global_set_policy_request_resource = global_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsInstanceTemplateRequest, dict] = None, + *, + project: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsInstanceTemplateRequest, dict]): + The request object. A request message for + InstanceTemplates.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsInstanceTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsInstanceTemplateRequest): + request = compute.TestIamPermissionsInstanceTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "InstanceTemplatesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/pagers.py new file mode 100644 index 000000000..fd3fa821d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceTemplateList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceTemplateList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceTemplateList], + request: compute.ListInstanceTemplatesRequest, + response: compute.InstanceTemplateList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInstanceTemplatesRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceTemplateList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInstanceTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceTemplateList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceTemplate]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/__init__.py new file mode 100644 index 000000000..2c4097e9c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import InstanceTemplatesTransport +from .rest import InstanceTemplatesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InstanceTemplatesTransport]] +_transport_registry['rest'] = InstanceTemplatesRestTransport + +__all__ = ( + 'InstanceTemplatesTransport', + 'InstanceTemplatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/base.py new file mode 100644 index 000000000..8fb2c243d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InstanceTemplatesTransport(abc.ABC): + """Abstract transport class for InstanceTemplates.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteInstanceTemplateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetInstanceTemplateRequest], + Union[ + compute.InstanceTemplate, + Awaitable[compute.InstanceTemplate] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyInstanceTemplateRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertInstanceTemplateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListInstanceTemplatesRequest], + Union[ + compute.InstanceTemplateList, + Awaitable[compute.InstanceTemplateList] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyInstanceTemplateRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsInstanceTemplateRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'InstanceTemplatesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/rest.py new file mode 100644 index 000000000..8b0c3556d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instance_templates/transports/rest.py @@ -0,0 +1,918 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import InstanceTemplatesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class InstanceTemplatesRestTransport(InstanceTemplatesTransport): + """REST backend transport for InstanceTemplates. + + The InstanceTemplates API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteInstanceTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteInstanceTemplateRequest): + The request object. A request message for + InstanceTemplates.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/instanceTemplates/{instance_template}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_template", + "instanceTemplate" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteInstanceTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInstanceTemplateRequest.to_json( + compute.DeleteInstanceTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetInstanceTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceTemplate: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetInstanceTemplateRequest): + The request object. A request message for + InstanceTemplates.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceTemplate: + Represents an Instance Template + resource. You can use instance templates + to create VM instances and managed + instance groups. For more information, + read Instance Templates. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/instanceTemplates/{instance_template}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_template", + "instanceTemplate" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetInstanceTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetInstanceTemplateRequest.to_json( + compute.GetInstanceTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceTemplate.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyInstanceTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyInstanceTemplateRequest): + The request object. A request message for + InstanceTemplates.GetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/instanceTemplates/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyInstanceTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyInstanceTemplateRequest.to_json( + compute.GetIamPolicyInstanceTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertInstanceTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertInstanceTemplateRequest): + The request object. A request message for + InstanceTemplates.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/instanceTemplates', + 'body': 'instance_template_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertInstanceTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceTemplate.to_json( + compute.InstanceTemplate( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertInstanceTemplateRequest.to_json( + compute.InsertInstanceTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListInstanceTemplatesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceTemplateList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListInstanceTemplatesRequest): + The request object. A request message for + InstanceTemplates.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceTemplateList: + A list of instance templates. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/instanceTemplates', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListInstanceTemplatesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInstanceTemplatesRequest.to_json( + compute.ListInstanceTemplatesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceTemplateList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyInstanceTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyInstanceTemplateRequest): + The request object. A request message for + InstanceTemplates.SetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/instanceTemplates/{resource}/setIamPolicy', + 'body': 'global_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyInstanceTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetPolicyRequest.to_json( + compute.GlobalSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyInstanceTemplateRequest.to_json( + compute.SetIamPolicyInstanceTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsInstanceTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsInstanceTemplateRequest): + The request object. A request message for + InstanceTemplates.TestIamPermissions. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/instanceTemplates/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsInstanceTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsInstanceTemplateRequest.to_json( + compute.TestIamPermissionsInstanceTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteInstanceTemplateRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetInstanceTemplateRequest], + compute.InstanceTemplate]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyInstanceTemplateRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertInstanceTemplateRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListInstanceTemplatesRequest], + compute.InstanceTemplateList]: + return self._list + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyInstanceTemplateRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsInstanceTemplateRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'InstanceTemplatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/__init__.py new file mode 100644 index 000000000..51aa24b56 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import InstancesClient + +__all__ = ( + 'InstancesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/client.py new file mode 100644 index 000000000..e88049f3f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/client.py @@ -0,0 +1,4819 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.instances import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import InstancesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import InstancesRestTransport + + +class InstancesClientMeta(type): + """Metaclass for the Instances client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[InstancesTransport]] + _transport_registry["rest"] = InstancesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[InstancesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InstancesClient(metaclass=InstancesClientMeta): + """The Instances API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstancesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InstancesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InstancesTransport: + """Returns the transport used by the client instance. + + Returns: + InstancesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, InstancesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the instances client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, InstancesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InstancesTransport): + # transport is a InstancesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_access_config(self, + request: Union[compute.AddAccessConfigInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + network_interface: str = None, + access_config_resource: compute.AccessConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds an access config to an instance's network + interface. + + Args: + request (Union[google.cloud.compute_v1.types.AddAccessConfigInstanceRequest, dict]): + The request object. A request message for + Instances.AddAccessConfig. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_interface (str): + The name of the network interface to + add to this instance. + + This corresponds to the ``network_interface`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + access_config_resource (google.cloud.compute_v1.types.AccessConfig): + The body resource for this request + This corresponds to the ``access_config_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, network_interface, access_config_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddAccessConfigInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddAccessConfigInstanceRequest): + request = compute.AddAccessConfigInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if network_interface is not None: + request.network_interface = network_interface + if access_config_resource is not None: + request.access_config_resource = access_config_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_access_config] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_resource_policies(self, + request: Union[compute.AddResourcePoliciesInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_add_resource_policies_request_resource: compute.InstancesAddResourcePoliciesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds existing resource policies to an instance. You + can only add one policy right now which will be applied + to this instance for scheduling live migrations. + + Args: + request (Union[google.cloud.compute_v1.types.AddResourcePoliciesInstanceRequest, dict]): + The request object. A request message for + Instances.AddResourcePolicies. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_add_resource_policies_request_resource (google.cloud.compute_v1.types.InstancesAddResourcePoliciesRequest): + The body resource for this request + This corresponds to the ``instances_add_resource_policies_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_add_resource_policies_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddResourcePoliciesInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddResourcePoliciesInstanceRequest): + request = compute.AddResourcePoliciesInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_add_resource_policies_request_resource is not None: + request.instances_add_resource_policies_request_resource = instances_add_resource_policies_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_resource_policies] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def aggregated_list(self, + request: Union[compute.AggregatedListInstancesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of all of the instances + in your project across all regions and zones. The + performance of this method degrades when a filter is + specified on a project that has a very large number of + instances. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListInstancesRequest, dict]): + The request object. A request message for + Instances.AggregatedList. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instances.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListInstancesRequest): + request = compute.AggregatedListInstancesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def attach_disk(self, + request: Union[compute.AttachDiskInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + attached_disk_resource: compute.AttachedDisk = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Attaches an existing Disk resource to an instance. + You must first create the disk before you can attach it. + It is not possible to create and attach a disk at the + same time. For more information, read Adding a + persistent disk to your instance. + + Args: + request (Union[google.cloud.compute_v1.types.AttachDiskInstanceRequest, dict]): + The request object. A request message for + Instances.AttachDisk. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + attached_disk_resource (google.cloud.compute_v1.types.AttachedDisk): + The body resource for this request + This corresponds to the ``attached_disk_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, attached_disk_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AttachDiskInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AttachDiskInstanceRequest): + request = compute.AttachDiskInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if attached_disk_resource is not None: + request.attached_disk_resource = attached_disk_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.attach_disk] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def bulk_insert(self, + request: Union[compute.BulkInsertInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + bulk_insert_instance_resource_resource: compute.BulkInsertInstanceResource = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates multiple instances. Count specifies the + number of instances to create. + + Args: + request (Union[google.cloud.compute_v1.types.BulkInsertInstanceRequest, dict]): + The request object. A request message for + Instances.BulkInsert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + bulk_insert_instance_resource_resource (google.cloud.compute_v1.types.BulkInsertInstanceResource): + The body resource for this request + This corresponds to the ``bulk_insert_instance_resource_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, bulk_insert_instance_resource_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.BulkInsertInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.BulkInsertInstanceRequest): + request = compute.BulkInsertInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if bulk_insert_instance_resource_resource is not None: + request.bulk_insert_instance_resource_resource = bulk_insert_instance_resource_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.bulk_insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified Instance resource. For more + information, see Deleting an instance. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInstanceRequest, dict]): + The request object. A request message for + Instances.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance resource to + delete. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInstanceRequest): + request = compute.DeleteInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_access_config(self, + request: Union[compute.DeleteAccessConfigInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + access_config: str = None, + network_interface: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes an access config from an instance's network + interface. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteAccessConfigInstanceRequest, dict]): + The request object. A request message for + Instances.DeleteAccessConfig. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + access_config (str): + The name of the access config to + delete. + + This corresponds to the ``access_config`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_interface (str): + The name of the network interface. + This corresponds to the ``network_interface`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, access_config, network_interface]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteAccessConfigInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteAccessConfigInstanceRequest): + request = compute.DeleteAccessConfigInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if access_config is not None: + request.access_config = access_config + if network_interface is not None: + request.network_interface = network_interface + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_access_config] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def detach_disk(self, + request: Union[compute.DetachDiskInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + device_name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Detaches a disk from an instance. + + Args: + request (Union[google.cloud.compute_v1.types.DetachDiskInstanceRequest, dict]): + The request object. A request message for + Instances.DetachDisk. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + device_name (str): + The device name of the disk to + detach. Make a get() request on the + instance to view currently attached + disks and device names. + + This corresponds to the ``device_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, device_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DetachDiskInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DetachDiskInstanceRequest): + request = compute.DetachDiskInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if device_name is not None: + request.device_name = device_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.detach_disk] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Instance: + r"""Returns the specified Instance resource. Gets a list + of available instances by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetInstanceRequest, dict]): + The request object. A request message for Instances.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance resource to + return. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Instance: + Represents an Instance resource. An + instance is a virtual machine that is + hosted on Google Cloud Platform. For + more information, read Virtual Machine + Instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetInstanceRequest): + request = compute.GetInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_effective_firewalls(self, + request: Union[compute.GetEffectiveFirewallsInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + network_interface: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InstancesGetEffectiveFirewallsResponse: + r"""Returns effective firewalls applied to an interface + of the instance. + + Args: + request (Union[google.cloud.compute_v1.types.GetEffectiveFirewallsInstanceRequest, dict]): + The request object. A request message for + Instances.GetEffectiveFirewalls. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_interface (str): + The name of the network interface to + get the effective firewalls. + + This corresponds to the ``network_interface`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InstancesGetEffectiveFirewallsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, network_interface]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetEffectiveFirewallsInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetEffectiveFirewallsInstanceRequest): + request = compute.GetEffectiveFirewallsInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if network_interface is not None: + request.network_interface = network_interface + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_effective_firewalls] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_guest_attributes(self, + request: Union[compute.GetGuestAttributesInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.GuestAttributes: + r"""Returns the specified guest attributes entry. + + Args: + request (Union[google.cloud.compute_v1.types.GetGuestAttributesInstanceRequest, dict]): + The request object. A request message for + Instances.GetGuestAttributes. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.GuestAttributes: + A guest attributes entry. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetGuestAttributesInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetGuestAttributesInstanceRequest): + request = compute.GetGuestAttributesInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_guest_attributes] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyInstanceRequest, dict]): + The request object. A request message for + Instances.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyInstanceRequest): + request = compute.GetIamPolicyInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_screenshot(self, + request: Union[compute.GetScreenshotInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Screenshot: + r"""Returns the screenshot from the specified instance. + + Args: + request (Union[google.cloud.compute_v1.types.GetScreenshotInstanceRequest, dict]): + The request object. A request message for + Instances.GetScreenshot. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Screenshot: + An instance's screenshot. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetScreenshotInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetScreenshotInstanceRequest): + request = compute.GetScreenshotInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_screenshot] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_serial_port_output(self, + request: Union[compute.GetSerialPortOutputInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SerialPortOutput: + r"""Returns the last 1 MB of serial port output from the + specified instance. + + Args: + request (Union[google.cloud.compute_v1.types.GetSerialPortOutputInstanceRequest, dict]): + The request object. A request message for + Instances.GetSerialPortOutput. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance for this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SerialPortOutput: + An instance serial console output. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetSerialPortOutputInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetSerialPortOutputInstanceRequest): + request = compute.GetSerialPortOutputInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_serial_port_output] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_shielded_instance_identity(self, + request: Union[compute.GetShieldedInstanceIdentityInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.ShieldedInstanceIdentity: + r"""Returns the Shielded Instance Identity of an instance + + Args: + request (Union[google.cloud.compute_v1.types.GetShieldedInstanceIdentityInstanceRequest, dict]): + The request object. A request message for + Instances.GetShieldedInstanceIdentity. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name or id of the instance scoping + this request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.ShieldedInstanceIdentity: + A Shielded Instance Identity. + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetShieldedInstanceIdentityInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetShieldedInstanceIdentityInstanceRequest): + request = compute.GetShieldedInstanceIdentityInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_shielded_instance_identity] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance_resource: compute.Instance = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an instance resource in the specified project + using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertInstanceRequest, dict]): + The request object. A request message for + Instances.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_resource (google.cloud.compute_v1.types.Instance): + The body resource for this request + This corresponds to the ``instance_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertInstanceRequest): + request = compute.InsertInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance_resource is not None: + request.instance_resource = instance_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListInstancesRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of instances contained within the + specified zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListInstancesRequest, dict]): + The request object. A request message for + Instances.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instances.pagers.ListPager: + Contains a list of instances. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInstancesRequest): + request = compute.ListInstancesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_referrers(self, + request: Union[compute.ListReferrersInstancesRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListReferrersPager: + r"""Retrieves a list of resources that refer to the VM + instance specified in the request. For example, if the + VM instance is part of a managed or unmanaged instance + group, the referrers list includes the instance group. + For more information, read Viewing referrers to VM + instances. + + Args: + request (Union[google.cloud.compute_v1.types.ListReferrersInstancesRequest, dict]): + The request object. A request message for + Instances.ListReferrers. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the target instance scoping + this request, or '-' if the request + should span over all instances in the + container. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.instances.pagers.ListReferrersPager: + Contains a list of instance + referrers. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListReferrersInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListReferrersInstancesRequest): + request = compute.ListReferrersInstancesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_referrers] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListReferrersPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_resource_policies(self, + request: Union[compute.RemoveResourcePoliciesInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_remove_resource_policies_request_resource: compute.InstancesRemoveResourcePoliciesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes resource policies from an instance. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveResourcePoliciesInstanceRequest, dict]): + The request object. A request message for + Instances.RemoveResourcePolicies. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_remove_resource_policies_request_resource (google.cloud.compute_v1.types.InstancesRemoveResourcePoliciesRequest): + The body resource for this request + This corresponds to the ``instances_remove_resource_policies_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_remove_resource_policies_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveResourcePoliciesInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveResourcePoliciesInstanceRequest): + request = compute.RemoveResourcePoliciesInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_remove_resource_policies_request_resource is not None: + request.instances_remove_resource_policies_request_resource = instances_remove_resource_policies_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_resource_policies] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def reset(self, + request: Union[compute.ResetInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Performs a reset on the instance. This is a hard + reset the VM does not do a graceful shutdown. For more + information, see Resetting an instance. + + Args: + request (Union[google.cloud.compute_v1.types.ResetInstanceRequest, dict]): + The request object. A request message for + Instances.Reset. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ResetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ResetInstanceRequest): + request = compute.ResetInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.reset] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def send_diagnostic_interrupt(self, + request: Union[compute.SendDiagnosticInterruptInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SendDiagnosticInterruptInstanceResponse: + r"""Sends diagnostic interrupt to the instance. + + Args: + request (Union[google.cloud.compute_v1.types.SendDiagnosticInterruptInstanceRequest, dict]): + The request object. A request message for + Instances.SendDiagnosticInterrupt. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SendDiagnosticInterruptInstanceResponse: + A response message for + Instances.SendDiagnosticInterrupt. See + the method description for details. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SendDiagnosticInterruptInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SendDiagnosticInterruptInstanceRequest): + request = compute.SendDiagnosticInterruptInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.send_diagnostic_interrupt] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_deletion_protection(self, + request: Union[compute.SetDeletionProtectionInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets deletion protection on the instance. + + Args: + request (Union[google.cloud.compute_v1.types.SetDeletionProtectionInstanceRequest, dict]): + The request object. A request message for + Instances.SetDeletionProtection. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetDeletionProtectionInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetDeletionProtectionInstanceRequest): + request = compute.SetDeletionProtectionInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_deletion_protection] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_disk_auto_delete(self, + request: Union[compute.SetDiskAutoDeleteInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + auto_delete: bool = None, + device_name: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the auto-delete flag for a disk attached to an + instance. + + Args: + request (Union[google.cloud.compute_v1.types.SetDiskAutoDeleteInstanceRequest, dict]): + The request object. A request message for + Instances.SetDiskAutoDelete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + auto_delete (bool): + Whether to auto-delete the disk when + the instance is deleted. + + This corresponds to the ``auto_delete`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + device_name (str): + The device name of the disk to + modify. Make a get() request on the + instance to view currently attached + disks and device names. + + This corresponds to the ``device_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, auto_delete, device_name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetDiskAutoDeleteInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetDiskAutoDeleteInstanceRequest): + request = compute.SetDiskAutoDeleteInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if auto_delete is not None: + request.auto_delete = auto_delete + if device_name is not None: + request.device_name = device_name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_disk_auto_delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + zone_set_policy_request_resource: compute.ZoneSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyInstanceRequest, dict]): + The request object. A request message for + Instances.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + This corresponds to the ``zone_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, zone_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyInstanceRequest): + request = compute.SetIamPolicyInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if zone_set_policy_request_resource is not None: + request.zone_set_policy_request_resource = zone_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_set_labels_request_resource: compute.InstancesSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets labels on an instance. To learn more about + labels, read the Labeling Resources documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsInstanceRequest, dict]): + The request object. A request message for + Instances.SetLabels. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_set_labels_request_resource (google.cloud.compute_v1.types.InstancesSetLabelsRequest): + The body resource for this request + This corresponds to the ``instances_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsInstanceRequest): + request = compute.SetLabelsInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_set_labels_request_resource is not None: + request.instances_set_labels_request_resource = instances_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_machine_resources(self, + request: Union[compute.SetMachineResourcesInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_set_machine_resources_request_resource: compute.InstancesSetMachineResourcesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the number and/or type of accelerator for a + stopped instance to the values specified in the request. + + Args: + request (Union[google.cloud.compute_v1.types.SetMachineResourcesInstanceRequest, dict]): + The request object. A request message for + Instances.SetMachineResources. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_set_machine_resources_request_resource (google.cloud.compute_v1.types.InstancesSetMachineResourcesRequest): + The body resource for this request + This corresponds to the ``instances_set_machine_resources_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_set_machine_resources_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetMachineResourcesInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetMachineResourcesInstanceRequest): + request = compute.SetMachineResourcesInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_set_machine_resources_request_resource is not None: + request.instances_set_machine_resources_request_resource = instances_set_machine_resources_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_machine_resources] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_machine_type(self, + request: Union[compute.SetMachineTypeInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_set_machine_type_request_resource: compute.InstancesSetMachineTypeRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the machine type for a stopped instance to + the machine type specified in the request. + + Args: + request (Union[google.cloud.compute_v1.types.SetMachineTypeInstanceRequest, dict]): + The request object. A request message for + Instances.SetMachineType. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_set_machine_type_request_resource (google.cloud.compute_v1.types.InstancesSetMachineTypeRequest): + The body resource for this request + This corresponds to the ``instances_set_machine_type_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_set_machine_type_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetMachineTypeInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetMachineTypeInstanceRequest): + request = compute.SetMachineTypeInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_set_machine_type_request_resource is not None: + request.instances_set_machine_type_request_resource = instances_set_machine_type_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_machine_type] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_metadata(self, + request: Union[compute.SetMetadataInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + metadata_resource: compute.Metadata = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets metadata for the specified instance to the data + included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.SetMetadataInstanceRequest, dict]): + The request object. A request message for + Instances.SetMetadata. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_resource (google.cloud.compute_v1.types.Metadata): + The body resource for this request + This corresponds to the ``metadata_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, metadata_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetMetadataInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetMetadataInstanceRequest): + request = compute.SetMetadataInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if metadata_resource is not None: + request.metadata_resource = metadata_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_metadata] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_min_cpu_platform(self, + request: Union[compute.SetMinCpuPlatformInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_set_min_cpu_platform_request_resource: compute.InstancesSetMinCpuPlatformRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the minimum CPU platform that this instance + should use. This method can only be called on a stopped + instance. For more information, read Specifying a + Minimum CPU Platform. + + Args: + request (Union[google.cloud.compute_v1.types.SetMinCpuPlatformInstanceRequest, dict]): + The request object. A request message for + Instances.SetMinCpuPlatform. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_set_min_cpu_platform_request_resource (google.cloud.compute_v1.types.InstancesSetMinCpuPlatformRequest): + The body resource for this request + This corresponds to the ``instances_set_min_cpu_platform_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_set_min_cpu_platform_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetMinCpuPlatformInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetMinCpuPlatformInstanceRequest): + request = compute.SetMinCpuPlatformInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_set_min_cpu_platform_request_resource is not None: + request.instances_set_min_cpu_platform_request_resource = instances_set_min_cpu_platform_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_min_cpu_platform] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_scheduling(self, + request: Union[compute.SetSchedulingInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + scheduling_resource: compute.Scheduling = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets an instance's scheduling options. You can only call this + method on a stopped instance, that is, a VM instance that is in + a ``TERMINATED`` state. See Instance Life Cycle for more + information on the possible instance states. + + Args: + request (Union[google.cloud.compute_v1.types.SetSchedulingInstanceRequest, dict]): + The request object. A request message for + Instances.SetScheduling. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + scheduling_resource (google.cloud.compute_v1.types.Scheduling): + The body resource for this request + This corresponds to the ``scheduling_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, scheduling_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetSchedulingInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetSchedulingInstanceRequest): + request = compute.SetSchedulingInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if scheduling_resource is not None: + request.scheduling_resource = scheduling_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_scheduling] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_service_account(self, + request: Union[compute.SetServiceAccountInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_set_service_account_request_resource: compute.InstancesSetServiceAccountRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the service account on the instance. For more + information, read Changing the service account and + access scopes for an instance. + + Args: + request (Union[google.cloud.compute_v1.types.SetServiceAccountInstanceRequest, dict]): + The request object. A request message for + Instances.SetServiceAccount. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance resource to + start. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_set_service_account_request_resource (google.cloud.compute_v1.types.InstancesSetServiceAccountRequest): + The body resource for this request + This corresponds to the ``instances_set_service_account_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_set_service_account_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetServiceAccountInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetServiceAccountInstanceRequest): + request = compute.SetServiceAccountInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_set_service_account_request_resource is not None: + request.instances_set_service_account_request_resource = instances_set_service_account_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_service_account] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_shielded_instance_integrity_policy(self, + request: Union[compute.SetShieldedInstanceIntegrityPolicyInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + shielded_instance_integrity_policy_resource: compute.ShieldedInstanceIntegrityPolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the Shielded Instance integrity policy for an + instance. You can only use this method on a running + instance. This method supports PATCH semantics and uses + the JSON merge patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.SetShieldedInstanceIntegrityPolicyInstanceRequest, dict]): + The request object. A request message for + Instances.SetShieldedInstanceIntegrityPolicy. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name or id of the instance scoping + this request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + shielded_instance_integrity_policy_resource (google.cloud.compute_v1.types.ShieldedInstanceIntegrityPolicy): + The body resource for this request + This corresponds to the ``shielded_instance_integrity_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, shielded_instance_integrity_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetShieldedInstanceIntegrityPolicyInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetShieldedInstanceIntegrityPolicyInstanceRequest): + request = compute.SetShieldedInstanceIntegrityPolicyInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if shielded_instance_integrity_policy_resource is not None: + request.shielded_instance_integrity_policy_resource = shielded_instance_integrity_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_shielded_instance_integrity_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_tags(self, + request: Union[compute.SetTagsInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + tags_resource: compute.Tags = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets network tags for the specified instance to the + data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.SetTagsInstanceRequest, dict]): + The request object. A request message for + Instances.SetTags. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + tags_resource (google.cloud.compute_v1.types.Tags): + The body resource for this request + This corresponds to the ``tags_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, tags_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetTagsInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetTagsInstanceRequest): + request = compute.SetTagsInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if tags_resource is not None: + request.tags_resource = tags_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_tags] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def simulate_maintenance_event(self, + request: Union[compute.SimulateMaintenanceEventInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Simulates a maintenance event on the instance. + + Args: + request (Union[google.cloud.compute_v1.types.SimulateMaintenanceEventInstanceRequest, dict]): + The request object. A request message for + Instances.SimulateMaintenanceEvent. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SimulateMaintenanceEventInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SimulateMaintenanceEventInstanceRequest): + request = compute.SimulateMaintenanceEventInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.simulate_maintenance_event] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def start(self, + request: Union[compute.StartInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Starts an instance that was stopped using the + instances().stop method. For more information, see + Restart an instance. + + Args: + request (Union[google.cloud.compute_v1.types.StartInstanceRequest, dict]): + The request object. A request message for + Instances.Start. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance resource to + start. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.StartInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.StartInstanceRequest): + request = compute.StartInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def start_with_encryption_key(self, + request: Union[compute.StartWithEncryptionKeyInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instances_start_with_encryption_key_request_resource: compute.InstancesStartWithEncryptionKeyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Starts an instance that was stopped using the + instances().stop method. For more information, see + Restart an instance. + + Args: + request (Union[google.cloud.compute_v1.types.StartWithEncryptionKeyInstanceRequest, dict]): + The request object. A request message for + Instances.StartWithEncryptionKey. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance resource to + start. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instances_start_with_encryption_key_request_resource (google.cloud.compute_v1.types.InstancesStartWithEncryptionKeyRequest): + The body resource for this request + This corresponds to the ``instances_start_with_encryption_key_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instances_start_with_encryption_key_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.StartWithEncryptionKeyInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.StartWithEncryptionKeyInstanceRequest): + request = compute.StartWithEncryptionKeyInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instances_start_with_encryption_key_request_resource is not None: + request.instances_start_with_encryption_key_request_resource = instances_start_with_encryption_key_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.start_with_encryption_key] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def stop(self, + request: Union[compute.StopInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Stops a running instance, shutting it down cleanly, + and allows you to restart the instance at a later time. + Stopped instances do not incur VM usage charges while + they are stopped. However, resources that the VM is + using, such as persistent disks and static IP addresses, + will continue to be charged until they are deleted. For + more information, see Stopping an instance. + + Args: + request (Union[google.cloud.compute_v1.types.StopInstanceRequest, dict]): + The request object. A request message for + Instances.Stop. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance resource to + stop. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.StopInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.StopInstanceRequest): + request = compute.StopInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.stop] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsInstanceRequest, dict]): + The request object. A request message for + Instances.TestIamPermissions. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsInstanceRequest): + request = compute.TestIamPermissionsInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + instance_resource: compute.Instance = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates an instance only if the necessary resources + are available. This method can update only a specific + set of instance properties. See Updating a running + instance for a list of updatable instance properties. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateInstanceRequest, dict]): + The request object. A request message for + Instances.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance resource to + update. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_resource (google.cloud.compute_v1.types.Instance): + The body resource for this request + This corresponds to the ``instance_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, instance_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateInstanceRequest): + request = compute.UpdateInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if instance_resource is not None: + request.instance_resource = instance_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_access_config(self, + request: Union[compute.UpdateAccessConfigInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + network_interface: str = None, + access_config_resource: compute.AccessConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified access config from an + instance's network interface with the data included in + the request. This method supports PATCH semantics and + uses the JSON merge patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateAccessConfigInstanceRequest, dict]): + The request object. A request message for + Instances.UpdateAccessConfig. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_interface (str): + The name of the network interface + where the access config is attached. + + This corresponds to the ``network_interface`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + access_config_resource (google.cloud.compute_v1.types.AccessConfig): + The body resource for this request + This corresponds to the ``access_config_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, network_interface, access_config_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateAccessConfigInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateAccessConfigInstanceRequest): + request = compute.UpdateAccessConfigInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if network_interface is not None: + request.network_interface = network_interface + if access_config_resource is not None: + request.access_config_resource = access_config_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_access_config] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_display_device(self, + request: Union[compute.UpdateDisplayDeviceInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + display_device_resource: compute.DisplayDevice = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the Display config for a VM instance. You can + only use this method on a stopped VM instance. This + method supports PATCH semantics and uses the JSON merge + patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateDisplayDeviceInstanceRequest, dict]): + The request object. A request message for + Instances.UpdateDisplayDevice. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name of the instance scoping this + request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + display_device_resource (google.cloud.compute_v1.types.DisplayDevice): + The body resource for this request + This corresponds to the ``display_device_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, display_device_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateDisplayDeviceInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateDisplayDeviceInstanceRequest): + request = compute.UpdateDisplayDeviceInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if display_device_resource is not None: + request.display_device_resource = display_device_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_display_device] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_network_interface(self, + request: Union[compute.UpdateNetworkInterfaceInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + network_interface: str = None, + network_interface_resource: compute.NetworkInterface = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates an instance's network interface. This method + can only update an interface's alias IP range and + attached network. See Modifying alias IP ranges for an + existing instance for instructions on changing alias IP + ranges. See Migrating a VM between networks for + instructions on migrating an interface. This method + follows PATCH semantics. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateNetworkInterfaceInstanceRequest, dict]): + The request object. A request message for + Instances.UpdateNetworkInterface. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + The instance name for this request. + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_interface (str): + The name of the network interface to + update. + + This corresponds to the ``network_interface`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_interface_resource (google.cloud.compute_v1.types.NetworkInterface): + The body resource for this request + This corresponds to the ``network_interface_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, network_interface, network_interface_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateNetworkInterfaceInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateNetworkInterfaceInstanceRequest): + request = compute.UpdateNetworkInterfaceInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if network_interface is not None: + request.network_interface = network_interface + if network_interface_resource is not None: + request.network_interface_resource = network_interface_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_network_interface] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_shielded_instance_config(self, + request: Union[compute.UpdateShieldedInstanceConfigInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + instance: str = None, + shielded_instance_config_resource: compute.ShieldedInstanceConfig = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the Shielded Instance config for an instance. + You can only use this method on a stopped instance. This + method supports PATCH semantics and uses the JSON merge + patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateShieldedInstanceConfigInstanceRequest, dict]): + The request object. A request message for + Instances.UpdateShieldedInstanceConfig. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (str): + Name or id of the instance scoping + this request. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + shielded_instance_config_resource (google.cloud.compute_v1.types.ShieldedInstanceConfig): + The body resource for this request + This corresponds to the ``shielded_instance_config_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, instance, shielded_instance_config_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateShieldedInstanceConfigInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateShieldedInstanceConfigInstanceRequest): + request = compute.UpdateShieldedInstanceConfigInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if instance is not None: + request.instance = instance + if shielded_instance_config_resource is not None: + request.shielded_instance_config_resource = shielded_instance_config_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_shielded_instance_config] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "InstancesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/pagers.py new file mode 100644 index 000000000..69a820d38 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/pagers.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceAggregatedList], + request: compute.AggregatedListInstancesRequest, + response: compute.InstanceAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListInstancesRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListInstancesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.InstancesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.InstancesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceList], + request: compute.ListInstancesRequest, + response: compute.InstanceList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInstancesRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInstancesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Instance]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListReferrersPager: + """A pager for iterating through ``list_referrers`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InstanceListReferrers` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListReferrers`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InstanceListReferrers` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InstanceListReferrers], + request: compute.ListReferrersInstancesRequest, + response: compute.InstanceListReferrers, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListReferrersInstancesRequest): + The initial request object. + response (google.cloud.compute_v1.types.InstanceListReferrers): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListReferrersInstancesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InstanceListReferrers]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Reference]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/__init__.py new file mode 100644 index 000000000..5c34ee354 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import InstancesTransport +from .rest import InstancesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InstancesTransport]] +_transport_registry['rest'] = InstancesRestTransport + +__all__ = ( + 'InstancesTransport', + 'InstancesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/base.py new file mode 100644 index 000000000..ae7760b81 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/base.py @@ -0,0 +1,735 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InstancesTransport(abc.ABC): + """Abstract transport class for Instances.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_access_config: gapic_v1.method.wrap_method( + self.add_access_config, + default_timeout=None, + client_info=client_info, + ), + self.add_resource_policies: gapic_v1.method.wrap_method( + self.add_resource_policies, + default_timeout=None, + client_info=client_info, + ), + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.attach_disk: gapic_v1.method.wrap_method( + self.attach_disk, + default_timeout=None, + client_info=client_info, + ), + self.bulk_insert: gapic_v1.method.wrap_method( + self.bulk_insert, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.delete_access_config: gapic_v1.method.wrap_method( + self.delete_access_config, + default_timeout=None, + client_info=client_info, + ), + self.detach_disk: gapic_v1.method.wrap_method( + self.detach_disk, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_effective_firewalls: gapic_v1.method.wrap_method( + self.get_effective_firewalls, + default_timeout=None, + client_info=client_info, + ), + self.get_guest_attributes: gapic_v1.method.wrap_method( + self.get_guest_attributes, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.get_screenshot: gapic_v1.method.wrap_method( + self.get_screenshot, + default_timeout=None, + client_info=client_info, + ), + self.get_serial_port_output: gapic_v1.method.wrap_method( + self.get_serial_port_output, + default_timeout=None, + client_info=client_info, + ), + self.get_shielded_instance_identity: gapic_v1.method.wrap_method( + self.get_shielded_instance_identity, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_referrers: gapic_v1.method.wrap_method( + self.list_referrers, + default_timeout=None, + client_info=client_info, + ), + self.remove_resource_policies: gapic_v1.method.wrap_method( + self.remove_resource_policies, + default_timeout=None, + client_info=client_info, + ), + self.reset: gapic_v1.method.wrap_method( + self.reset, + default_timeout=None, + client_info=client_info, + ), + self.send_diagnostic_interrupt: gapic_v1.method.wrap_method( + self.send_diagnostic_interrupt, + default_timeout=None, + client_info=client_info, + ), + self.set_deletion_protection: gapic_v1.method.wrap_method( + self.set_deletion_protection, + default_timeout=None, + client_info=client_info, + ), + self.set_disk_auto_delete: gapic_v1.method.wrap_method( + self.set_disk_auto_delete, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.set_machine_resources: gapic_v1.method.wrap_method( + self.set_machine_resources, + default_timeout=None, + client_info=client_info, + ), + self.set_machine_type: gapic_v1.method.wrap_method( + self.set_machine_type, + default_timeout=None, + client_info=client_info, + ), + self.set_metadata: gapic_v1.method.wrap_method( + self.set_metadata, + default_timeout=None, + client_info=client_info, + ), + self.set_min_cpu_platform: gapic_v1.method.wrap_method( + self.set_min_cpu_platform, + default_timeout=None, + client_info=client_info, + ), + self.set_scheduling: gapic_v1.method.wrap_method( + self.set_scheduling, + default_timeout=None, + client_info=client_info, + ), + self.set_service_account: gapic_v1.method.wrap_method( + self.set_service_account, + default_timeout=None, + client_info=client_info, + ), + self.set_shielded_instance_integrity_policy: gapic_v1.method.wrap_method( + self.set_shielded_instance_integrity_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_tags: gapic_v1.method.wrap_method( + self.set_tags, + default_timeout=None, + client_info=client_info, + ), + self.simulate_maintenance_event: gapic_v1.method.wrap_method( + self.simulate_maintenance_event, + default_timeout=None, + client_info=client_info, + ), + self.start: gapic_v1.method.wrap_method( + self.start, + default_timeout=None, + client_info=client_info, + ), + self.start_with_encryption_key: gapic_v1.method.wrap_method( + self.start_with_encryption_key, + default_timeout=None, + client_info=client_info, + ), + self.stop: gapic_v1.method.wrap_method( + self.stop, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + self.update_access_config: gapic_v1.method.wrap_method( + self.update_access_config, + default_timeout=None, + client_info=client_info, + ), + self.update_display_device: gapic_v1.method.wrap_method( + self.update_display_device, + default_timeout=None, + client_info=client_info, + ), + self.update_network_interface: gapic_v1.method.wrap_method( + self.update_network_interface, + default_timeout=None, + client_info=client_info, + ), + self.update_shielded_instance_config: gapic_v1.method.wrap_method( + self.update_shielded_instance_config, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_access_config(self) -> Callable[ + [compute.AddAccessConfigInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def add_resource_policies(self) -> Callable[ + [compute.AddResourcePoliciesInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInstancesRequest], + Union[ + compute.InstanceAggregatedList, + Awaitable[compute.InstanceAggregatedList] + ]]: + raise NotImplementedError() + + @property + def attach_disk(self) -> Callable[ + [compute.AttachDiskInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def bulk_insert(self) -> Callable[ + [compute.BulkInsertInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_access_config(self) -> Callable[ + [compute.DeleteAccessConfigInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def detach_disk(self) -> Callable[ + [compute.DetachDiskInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetInstanceRequest], + Union[ + compute.Instance, + Awaitable[compute.Instance] + ]]: + raise NotImplementedError() + + @property + def get_effective_firewalls(self) -> Callable[ + [compute.GetEffectiveFirewallsInstanceRequest], + Union[ + compute.InstancesGetEffectiveFirewallsResponse, + Awaitable[compute.InstancesGetEffectiveFirewallsResponse] + ]]: + raise NotImplementedError() + + @property + def get_guest_attributes(self) -> Callable[ + [compute.GetGuestAttributesInstanceRequest], + Union[ + compute.GuestAttributes, + Awaitable[compute.GuestAttributes] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyInstanceRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def get_screenshot(self) -> Callable[ + [compute.GetScreenshotInstanceRequest], + Union[ + compute.Screenshot, + Awaitable[compute.Screenshot] + ]]: + raise NotImplementedError() + + @property + def get_serial_port_output(self) -> Callable[ + [compute.GetSerialPortOutputInstanceRequest], + Union[ + compute.SerialPortOutput, + Awaitable[compute.SerialPortOutput] + ]]: + raise NotImplementedError() + + @property + def get_shielded_instance_identity(self) -> Callable[ + [compute.GetShieldedInstanceIdentityInstanceRequest], + Union[ + compute.ShieldedInstanceIdentity, + Awaitable[compute.ShieldedInstanceIdentity] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListInstancesRequest], + Union[ + compute.InstanceList, + Awaitable[compute.InstanceList] + ]]: + raise NotImplementedError() + + @property + def list_referrers(self) -> Callable[ + [compute.ListReferrersInstancesRequest], + Union[ + compute.InstanceListReferrers, + Awaitable[compute.InstanceListReferrers] + ]]: + raise NotImplementedError() + + @property + def remove_resource_policies(self) -> Callable[ + [compute.RemoveResourcePoliciesInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def reset(self) -> Callable[ + [compute.ResetInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def send_diagnostic_interrupt(self) -> Callable[ + [compute.SendDiagnosticInterruptInstanceRequest], + Union[ + compute.SendDiagnosticInterruptInstanceResponse, + Awaitable[compute.SendDiagnosticInterruptInstanceResponse] + ]]: + raise NotImplementedError() + + @property + def set_deletion_protection(self) -> Callable[ + [compute.SetDeletionProtectionInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_disk_auto_delete(self) -> Callable[ + [compute.SetDiskAutoDeleteInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyInstanceRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_machine_resources(self) -> Callable[ + [compute.SetMachineResourcesInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_machine_type(self) -> Callable[ + [compute.SetMachineTypeInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_metadata(self) -> Callable[ + [compute.SetMetadataInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_min_cpu_platform(self) -> Callable[ + [compute.SetMinCpuPlatformInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_scheduling(self) -> Callable[ + [compute.SetSchedulingInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_service_account(self) -> Callable[ + [compute.SetServiceAccountInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_shielded_instance_integrity_policy(self) -> Callable[ + [compute.SetShieldedInstanceIntegrityPolicyInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_tags(self) -> Callable[ + [compute.SetTagsInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def simulate_maintenance_event(self) -> Callable[ + [compute.SimulateMaintenanceEventInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def start(self) -> Callable[ + [compute.StartInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def start_with_encryption_key(self) -> Callable[ + [compute.StartWithEncryptionKeyInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def stop(self) -> Callable[ + [compute.StopInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsInstanceRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update_access_config(self) -> Callable[ + [compute.UpdateAccessConfigInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update_display_device(self) -> Callable[ + [compute.UpdateDisplayDeviceInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update_network_interface(self) -> Callable[ + [compute.UpdateNetworkInterfaceInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update_shielded_instance_config(self) -> Callable[ + [compute.UpdateShieldedInstanceConfigInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'InstancesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/rest.py new file mode 100644 index 000000000..1d89097b6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/instances/transports/rest.py @@ -0,0 +1,5104 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import InstancesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class InstancesRestTransport(InstancesTransport): + """REST backend transport for Instances. + + The Instances API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_access_config(self, + request: compute.AddAccessConfigInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add access config method over HTTP. + + Args: + request (~.compute.AddAccessConfigInstanceRequest): + The request object. A request message for + Instances.AddAccessConfig. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig', + 'body': 'access_config_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "network_interface", + "networkInterface" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AddAccessConfigInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.AccessConfig.to_json( + compute.AccessConfig( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddAccessConfigInstanceRequest.to_json( + compute.AddAccessConfigInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _add_resource_policies(self, + request: compute.AddResourcePoliciesInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add resource policies method over HTTP. + + Args: + request (~.compute.AddResourcePoliciesInstanceRequest): + The request object. A request message for + Instances.AddResourcePolicies. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies', + 'body': 'instances_add_resource_policies_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AddResourcePoliciesInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesAddResourcePoliciesRequest.to_json( + compute.InstancesAddResourcePoliciesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddResourcePoliciesInstanceRequest.to_json( + compute.AddResourcePoliciesInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _aggregated_list(self, + request: compute.AggregatedListInstancesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListInstancesRequest): + The request object. A request message for + Instances.AggregatedList. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/instances', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListInstancesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListInstancesRequest.to_json( + compute.AggregatedListInstancesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _attach_disk(self, + request: compute.AttachDiskInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the attach disk method over HTTP. + + Args: + request (~.compute.AttachDiskInstanceRequest): + The request object. A request message for + Instances.AttachDisk. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/attachDisk', + 'body': 'attached_disk_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AttachDiskInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.AttachedDisk.to_json( + compute.AttachedDisk( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AttachDiskInstanceRequest.to_json( + compute.AttachDiskInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _bulk_insert(self, + request: compute.BulkInsertInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the bulk insert method over HTTP. + + Args: + request (~.compute.BulkInsertInstanceRequest): + The request object. A request message for + Instances.BulkInsert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/bulkInsert', + 'body': 'bulk_insert_instance_resource_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.BulkInsertInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BulkInsertInstanceResource.to_json( + compute.BulkInsertInstanceResource( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.BulkInsertInstanceRequest.to_json( + compute.BulkInsertInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteInstanceRequest): + The request object. A request message for + Instances.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInstanceRequest.to_json( + compute.DeleteInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_access_config(self, + request: compute.DeleteAccessConfigInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete access config method over HTTP. + + Args: + request (~.compute.DeleteAccessConfigInstanceRequest): + The request object. A request message for + Instances.DeleteAccessConfig. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "access_config", + "accessConfig" + ), + ( + "instance", + "instance" + ), + ( + "network_interface", + "networkInterface" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteAccessConfigInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteAccessConfigInstanceRequest.to_json( + compute.DeleteAccessConfigInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _detach_disk(self, + request: compute.DetachDiskInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the detach disk method over HTTP. + + Args: + request (~.compute.DetachDiskInstanceRequest): + The request object. A request message for + Instances.DetachDisk. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/detachDisk', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "device_name", + "deviceName" + ), + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DetachDiskInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DetachDiskInstanceRequest.to_json( + compute.DetachDiskInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Instance: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetInstanceRequest): + The request object. A request message for Instances.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Instance: + Represents an Instance resource. An + instance is a virtual machine that is + hosted on Google Cloud Platform. For + more information, read Virtual Machine + Instances. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetInstanceRequest.to_json( + compute.GetInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Instance.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_effective_firewalls(self, + request: compute.GetEffectiveFirewallsInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstancesGetEffectiveFirewallsResponse: + r"""Call the get effective firewalls method over HTTP. + + Args: + request (~.compute.GetEffectiveFirewallsInstanceRequest): + The request object. A request message for + Instances.GetEffectiveFirewalls. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstancesGetEffectiveFirewallsResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "network_interface", + "networkInterface" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetEffectiveFirewallsInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetEffectiveFirewallsInstanceRequest.to_json( + compute.GetEffectiveFirewallsInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstancesGetEffectiveFirewallsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_guest_attributes(self, + request: compute.GetGuestAttributesInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.GuestAttributes: + r"""Call the get guest attributes method over HTTP. + + Args: + request (~.compute.GetGuestAttributesInstanceRequest): + The request object. A request message for + Instances.GetGuestAttributes. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.GuestAttributes: + A guest attributes entry. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetGuestAttributesInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetGuestAttributesInstanceRequest.to_json( + compute.GetGuestAttributesInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.GuestAttributes.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyInstanceRequest): + The request object. A request message for + Instances.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetIamPolicyInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyInstanceRequest.to_json( + compute.GetIamPolicyInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_screenshot(self, + request: compute.GetScreenshotInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Screenshot: + r"""Call the get screenshot method over HTTP. + + Args: + request (~.compute.GetScreenshotInstanceRequest): + The request object. A request message for + Instances.GetScreenshot. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Screenshot: + An instance's screenshot. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/screenshot', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetScreenshotInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetScreenshotInstanceRequest.to_json( + compute.GetScreenshotInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Screenshot.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_serial_port_output(self, + request: compute.GetSerialPortOutputInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SerialPortOutput: + r"""Call the get serial port output method over HTTP. + + Args: + request (~.compute.GetSerialPortOutputInstanceRequest): + The request object. A request message for + Instances.GetSerialPortOutput. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SerialPortOutput: + An instance serial console output. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/serialPort', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetSerialPortOutputInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetSerialPortOutputInstanceRequest.to_json( + compute.GetSerialPortOutputInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SerialPortOutput.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_shielded_instance_identity(self, + request: compute.GetShieldedInstanceIdentityInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ShieldedInstanceIdentity: + r"""Call the get shielded instance + identity method over HTTP. + + Args: + request (~.compute.GetShieldedInstanceIdentityInstanceRequest): + The request object. A request message for + Instances.GetShieldedInstanceIdentity. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ShieldedInstanceIdentity: + A Shielded Instance Identity. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetShieldedInstanceIdentityInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetShieldedInstanceIdentityInstanceRequest.to_json( + compute.GetShieldedInstanceIdentityInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ShieldedInstanceIdentity.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertInstanceRequest): + The request object. A request message for + Instances.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances', + 'body': 'instance_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Instance.to_json( + compute.Instance( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertInstanceRequest.to_json( + compute.InsertInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListInstancesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListInstancesRequest): + The request object. A request message for Instances.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceList: + Contains a list of instances. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListInstancesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInstancesRequest.to_json( + compute.ListInstancesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_referrers(self, + request: compute.ListReferrersInstancesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceListReferrers: + r"""Call the list referrers method over HTTP. + + Args: + request (~.compute.ListReferrersInstancesRequest): + The request object. A request message for + Instances.ListReferrers. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceListReferrers: + Contains a list of instance + referrers. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/referrers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListReferrersInstancesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListReferrersInstancesRequest.to_json( + compute.ListReferrersInstancesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceListReferrers.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_resource_policies(self, + request: compute.RemoveResourcePoliciesInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove resource policies method over HTTP. + + Args: + request (~.compute.RemoveResourcePoliciesInstanceRequest): + The request object. A request message for + Instances.RemoveResourcePolicies. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies', + 'body': 'instances_remove_resource_policies_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.RemoveResourcePoliciesInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesRemoveResourcePoliciesRequest.to_json( + compute.InstancesRemoveResourcePoliciesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveResourcePoliciesInstanceRequest.to_json( + compute.RemoveResourcePoliciesInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _reset(self, + request: compute.ResetInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the reset method over HTTP. + + Args: + request (~.compute.ResetInstanceRequest): + The request object. A request message for + Instances.Reset. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/reset', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ResetInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ResetInstanceRequest.to_json( + compute.ResetInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _send_diagnostic_interrupt(self, + request: compute.SendDiagnosticInterruptInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SendDiagnosticInterruptInstanceResponse: + r"""Call the send diagnostic interrupt method over HTTP. + + Args: + request (~.compute.SendDiagnosticInterruptInstanceRequest): + The request object. A request message for + Instances.SendDiagnosticInterrupt. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SendDiagnosticInterruptInstanceResponse: + A response message for + Instances.SendDiagnosticInterrupt. See + the method description for details. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SendDiagnosticInterruptInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SendDiagnosticInterruptInstanceRequest.to_json( + compute.SendDiagnosticInterruptInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SendDiagnosticInterruptInstanceResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_deletion_protection(self, + request: compute.SetDeletionProtectionInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set deletion protection method over HTTP. + + Args: + request (~.compute.SetDeletionProtectionInstanceRequest): + The request object. A request message for + Instances.SetDeletionProtection. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetDeletionProtectionInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetDeletionProtectionInstanceRequest.to_json( + compute.SetDeletionProtectionInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_disk_auto_delete(self, + request: compute.SetDiskAutoDeleteInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set disk auto delete method over HTTP. + + Args: + request (~.compute.SetDiskAutoDeleteInstanceRequest): + The request object. A request message for + Instances.SetDiskAutoDelete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "auto_delete", + "autoDelete" + ), + ( + "device_name", + "deviceName" + ), + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetDiskAutoDeleteInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetDiskAutoDeleteInstanceRequest.to_json( + compute.SetDiskAutoDeleteInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyInstanceRequest): + The request object. A request message for + Instances.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy', + 'body': 'zone_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetIamPolicyInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ZoneSetPolicyRequest.to_json( + compute.ZoneSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyInstanceRequest.to_json( + compute.SetIamPolicyInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsInstanceRequest): + The request object. A request message for + Instances.SetLabels. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setLabels', + 'body': 'instances_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetLabelsInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesSetLabelsRequest.to_json( + compute.InstancesSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsInstanceRequest.to_json( + compute.SetLabelsInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_machine_resources(self, + request: compute.SetMachineResourcesInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set machine resources method over HTTP. + + Args: + request (~.compute.SetMachineResourcesInstanceRequest): + The request object. A request message for + Instances.SetMachineResources. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMachineResources', + 'body': 'instances_set_machine_resources_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetMachineResourcesInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesSetMachineResourcesRequest.to_json( + compute.InstancesSetMachineResourcesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetMachineResourcesInstanceRequest.to_json( + compute.SetMachineResourcesInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_machine_type(self, + request: compute.SetMachineTypeInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set machine type method over HTTP. + + Args: + request (~.compute.SetMachineTypeInstanceRequest): + The request object. A request message for + Instances.SetMachineType. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMachineType', + 'body': 'instances_set_machine_type_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetMachineTypeInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesSetMachineTypeRequest.to_json( + compute.InstancesSetMachineTypeRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetMachineTypeInstanceRequest.to_json( + compute.SetMachineTypeInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_metadata(self, + request: compute.SetMetadataInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set metadata method over HTTP. + + Args: + request (~.compute.SetMetadataInstanceRequest): + The request object. A request message for + Instances.SetMetadata. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMetadata', + 'body': 'metadata_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetMetadataInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Metadata.to_json( + compute.Metadata( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetMetadataInstanceRequest.to_json( + compute.SetMetadataInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_min_cpu_platform(self, + request: compute.SetMinCpuPlatformInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set min cpu platform method over HTTP. + + Args: + request (~.compute.SetMinCpuPlatformInstanceRequest): + The request object. A request message for + Instances.SetMinCpuPlatform. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform', + 'body': 'instances_set_min_cpu_platform_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetMinCpuPlatformInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesSetMinCpuPlatformRequest.to_json( + compute.InstancesSetMinCpuPlatformRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetMinCpuPlatformInstanceRequest.to_json( + compute.SetMinCpuPlatformInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_scheduling(self, + request: compute.SetSchedulingInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set scheduling method over HTTP. + + Args: + request (~.compute.SetSchedulingInstanceRequest): + The request object. A request message for + Instances.SetScheduling. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setScheduling', + 'body': 'scheduling_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetSchedulingInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Scheduling.to_json( + compute.Scheduling( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetSchedulingInstanceRequest.to_json( + compute.SetSchedulingInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_service_account(self, + request: compute.SetServiceAccountInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set service account method over HTTP. + + Args: + request (~.compute.SetServiceAccountInstanceRequest): + The request object. A request message for + Instances.SetServiceAccount. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount', + 'body': 'instances_set_service_account_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetServiceAccountInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesSetServiceAccountRequest.to_json( + compute.InstancesSetServiceAccountRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetServiceAccountInstanceRequest.to_json( + compute.SetServiceAccountInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_shielded_instance_integrity_policy(self, + request: compute.SetShieldedInstanceIntegrityPolicyInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set shielded instance + integrity policy method over HTTP. + + Args: + request (~.compute.SetShieldedInstanceIntegrityPolicyInstanceRequest): + The request object. A request message for + Instances.SetShieldedInstanceIntegrityPolicy. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy', + 'body': 'shielded_instance_integrity_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetShieldedInstanceIntegrityPolicyInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ShieldedInstanceIntegrityPolicy.to_json( + compute.ShieldedInstanceIntegrityPolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetShieldedInstanceIntegrityPolicyInstanceRequest.to_json( + compute.SetShieldedInstanceIntegrityPolicyInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_tags(self, + request: compute.SetTagsInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set tags method over HTTP. + + Args: + request (~.compute.SetTagsInstanceRequest): + The request object. A request message for + Instances.SetTags. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setTags', + 'body': 'tags_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetTagsInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Tags.to_json( + compute.Tags( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetTagsInstanceRequest.to_json( + compute.SetTagsInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _simulate_maintenance_event(self, + request: compute.SimulateMaintenanceEventInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the simulate maintenance + event method over HTTP. + + Args: + request (~.compute.SimulateMaintenanceEventInstanceRequest): + The request object. A request message for + Instances.SimulateMaintenanceEvent. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SimulateMaintenanceEventInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SimulateMaintenanceEventInstanceRequest.to_json( + compute.SimulateMaintenanceEventInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _start(self, + request: compute.StartInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the start method over HTTP. + + Args: + request (~.compute.StartInstanceRequest): + The request object. A request message for + Instances.Start. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/start', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.StartInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.StartInstanceRequest.to_json( + compute.StartInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _start_with_encryption_key(self, + request: compute.StartWithEncryptionKeyInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the start with encryption key method over HTTP. + + Args: + request (~.compute.StartWithEncryptionKeyInstanceRequest): + The request object. A request message for + Instances.StartWithEncryptionKey. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey', + 'body': 'instances_start_with_encryption_key_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.StartWithEncryptionKeyInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstancesStartWithEncryptionKeyRequest.to_json( + compute.InstancesStartWithEncryptionKeyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.StartWithEncryptionKeyInstanceRequest.to_json( + compute.StartWithEncryptionKeyInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _stop(self, + request: compute.StopInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the stop method over HTTP. + + Args: + request (~.compute.StopInstanceRequest): + The request object. A request message for Instances.Stop. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/stop', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.StopInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.StopInstanceRequest.to_json( + compute.StopInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsInstanceRequest): + The request object. A request message for + Instances.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.TestIamPermissionsInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsInstanceRequest.to_json( + compute.TestIamPermissionsInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateInstanceRequest): + The request object. A request message for + Instances.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}', + 'body': 'instance_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.UpdateInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Instance.to_json( + compute.Instance( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateInstanceRequest.to_json( + compute.UpdateInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update_access_config(self, + request: compute.UpdateAccessConfigInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update access config method over HTTP. + + Args: + request (~.compute.UpdateAccessConfigInstanceRequest): + The request object. A request message for + Instances.UpdateAccessConfig. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig', + 'body': 'access_config_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "network_interface", + "networkInterface" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.UpdateAccessConfigInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.AccessConfig.to_json( + compute.AccessConfig( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateAccessConfigInstanceRequest.to_json( + compute.UpdateAccessConfigInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update_display_device(self, + request: compute.UpdateDisplayDeviceInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update display device method over HTTP. + + Args: + request (~.compute.UpdateDisplayDeviceInstanceRequest): + The request object. A request message for + Instances.UpdateDisplayDevice. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice', + 'body': 'display_device_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.UpdateDisplayDeviceInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.DisplayDevice.to_json( + compute.DisplayDevice( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateDisplayDeviceInstanceRequest.to_json( + compute.UpdateDisplayDeviceInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update_network_interface(self, + request: compute.UpdateNetworkInterfaceInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update network interface method over HTTP. + + Args: + request (~.compute.UpdateNetworkInterfaceInstanceRequest): + The request object. A request message for + Instances.UpdateNetworkInterface. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface', + 'body': 'network_interface_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "network_interface", + "networkInterface" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.UpdateNetworkInterfaceInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworkInterface.to_json( + compute.NetworkInterface( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateNetworkInterfaceInstanceRequest.to_json( + compute.UpdateNetworkInterfaceInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update_shielded_instance_config(self, + request: compute.UpdateShieldedInstanceConfigInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update shielded instance + config method over HTTP. + + Args: + request (~.compute.UpdateShieldedInstanceConfigInstanceRequest): + The request object. A request message for + Instances.UpdateShieldedInstanceConfig. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig', + 'body': 'shielded_instance_config_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance", + "instance" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.UpdateShieldedInstanceConfigInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ShieldedInstanceConfig.to_json( + compute.ShieldedInstanceConfig( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateShieldedInstanceConfigInstanceRequest.to_json( + compute.UpdateShieldedInstanceConfigInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_access_config(self) -> Callable[ + [compute.AddAccessConfigInstanceRequest], + compute.Operation]: + return self._add_access_config + @ property + def add_resource_policies(self) -> Callable[ + [compute.AddResourcePoliciesInstanceRequest], + compute.Operation]: + return self._add_resource_policies + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInstancesRequest], + compute.InstanceAggregatedList]: + return self._aggregated_list + @ property + def attach_disk(self) -> Callable[ + [compute.AttachDiskInstanceRequest], + compute.Operation]: + return self._attach_disk + @ property + def bulk_insert(self) -> Callable[ + [compute.BulkInsertInstanceRequest], + compute.Operation]: + return self._bulk_insert + @ property + def delete(self) -> Callable[ + [compute.DeleteInstanceRequest], + compute.Operation]: + return self._delete + @ property + def delete_access_config(self) -> Callable[ + [compute.DeleteAccessConfigInstanceRequest], + compute.Operation]: + return self._delete_access_config + @ property + def detach_disk(self) -> Callable[ + [compute.DetachDiskInstanceRequest], + compute.Operation]: + return self._detach_disk + @ property + def get(self) -> Callable[ + [compute.GetInstanceRequest], + compute.Instance]: + return self._get + @ property + def get_effective_firewalls(self) -> Callable[ + [compute.GetEffectiveFirewallsInstanceRequest], + compute.InstancesGetEffectiveFirewallsResponse]: + return self._get_effective_firewalls + @ property + def get_guest_attributes(self) -> Callable[ + [compute.GetGuestAttributesInstanceRequest], + compute.GuestAttributes]: + return self._get_guest_attributes + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyInstanceRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def get_screenshot(self) -> Callable[ + [compute.GetScreenshotInstanceRequest], + compute.Screenshot]: + return self._get_screenshot + @ property + def get_serial_port_output(self) -> Callable[ + [compute.GetSerialPortOutputInstanceRequest], + compute.SerialPortOutput]: + return self._get_serial_port_output + @ property + def get_shielded_instance_identity(self) -> Callable[ + [compute.GetShieldedInstanceIdentityInstanceRequest], + compute.ShieldedInstanceIdentity]: + return self._get_shielded_instance_identity + @ property + def insert(self) -> Callable[ + [compute.InsertInstanceRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListInstancesRequest], + compute.InstanceList]: + return self._list + @ property + def list_referrers(self) -> Callable[ + [compute.ListReferrersInstancesRequest], + compute.InstanceListReferrers]: + return self._list_referrers + @ property + def remove_resource_policies(self) -> Callable[ + [compute.RemoveResourcePoliciesInstanceRequest], + compute.Operation]: + return self._remove_resource_policies + @ property + def reset(self) -> Callable[ + [compute.ResetInstanceRequest], + compute.Operation]: + return self._reset + @ property + def send_diagnostic_interrupt(self) -> Callable[ + [compute.SendDiagnosticInterruptInstanceRequest], + compute.SendDiagnosticInterruptInstanceResponse]: + return self._send_diagnostic_interrupt + @ property + def set_deletion_protection(self) -> Callable[ + [compute.SetDeletionProtectionInstanceRequest], + compute.Operation]: + return self._set_deletion_protection + @ property + def set_disk_auto_delete(self) -> Callable[ + [compute.SetDiskAutoDeleteInstanceRequest], + compute.Operation]: + return self._set_disk_auto_delete + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyInstanceRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsInstanceRequest], + compute.Operation]: + return self._set_labels + @ property + def set_machine_resources(self) -> Callable[ + [compute.SetMachineResourcesInstanceRequest], + compute.Operation]: + return self._set_machine_resources + @ property + def set_machine_type(self) -> Callable[ + [compute.SetMachineTypeInstanceRequest], + compute.Operation]: + return self._set_machine_type + @ property + def set_metadata(self) -> Callable[ + [compute.SetMetadataInstanceRequest], + compute.Operation]: + return self._set_metadata + @ property + def set_min_cpu_platform(self) -> Callable[ + [compute.SetMinCpuPlatformInstanceRequest], + compute.Operation]: + return self._set_min_cpu_platform + @ property + def set_scheduling(self) -> Callable[ + [compute.SetSchedulingInstanceRequest], + compute.Operation]: + return self._set_scheduling + @ property + def set_service_account(self) -> Callable[ + [compute.SetServiceAccountInstanceRequest], + compute.Operation]: + return self._set_service_account + @ property + def set_shielded_instance_integrity_policy(self) -> Callable[ + [compute.SetShieldedInstanceIntegrityPolicyInstanceRequest], + compute.Operation]: + return self._set_shielded_instance_integrity_policy + @ property + def set_tags(self) -> Callable[ + [compute.SetTagsInstanceRequest], + compute.Operation]: + return self._set_tags + @ property + def simulate_maintenance_event(self) -> Callable[ + [compute.SimulateMaintenanceEventInstanceRequest], + compute.Operation]: + return self._simulate_maintenance_event + @ property + def start(self) -> Callable[ + [compute.StartInstanceRequest], + compute.Operation]: + return self._start + @ property + def start_with_encryption_key(self) -> Callable[ + [compute.StartWithEncryptionKeyInstanceRequest], + compute.Operation]: + return self._start_with_encryption_key + @ property + def stop(self) -> Callable[ + [compute.StopInstanceRequest], + compute.Operation]: + return self._stop + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsInstanceRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + @ property + def update(self) -> Callable[ + [compute.UpdateInstanceRequest], + compute.Operation]: + return self._update + @ property + def update_access_config(self) -> Callable[ + [compute.UpdateAccessConfigInstanceRequest], + compute.Operation]: + return self._update_access_config + @ property + def update_display_device(self) -> Callable[ + [compute.UpdateDisplayDeviceInstanceRequest], + compute.Operation]: + return self._update_display_device + @ property + def update_network_interface(self) -> Callable[ + [compute.UpdateNetworkInterfaceInstanceRequest], + compute.Operation]: + return self._update_network_interface + @ property + def update_shielded_instance_config(self) -> Callable[ + [compute.UpdateShieldedInstanceConfigInstanceRequest], + compute.Operation]: + return self._update_shielded_instance_config + def close(self): + self._session.close() + + +__all__=( + 'InstancesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/__init__.py new file mode 100644 index 000000000..4f772fed0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import InterconnectAttachmentsClient + +__all__ = ( + 'InterconnectAttachmentsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/client.py new file mode 100644 index 000000000..49e0f121b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/client.py @@ -0,0 +1,912 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.interconnect_attachments import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import InterconnectAttachmentsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import InterconnectAttachmentsRestTransport + + +class InterconnectAttachmentsClientMeta(type): + """Metaclass for the InterconnectAttachments client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[InterconnectAttachmentsTransport]] + _transport_registry["rest"] = InterconnectAttachmentsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[InterconnectAttachmentsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InterconnectAttachmentsClient(metaclass=InterconnectAttachmentsClientMeta): + """The InterconnectAttachments API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InterconnectAttachmentsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InterconnectAttachmentsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InterconnectAttachmentsTransport: + """Returns the transport used by the client instance. + + Returns: + InterconnectAttachmentsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, InterconnectAttachmentsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the interconnect attachments client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, InterconnectAttachmentsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InterconnectAttachmentsTransport): + # transport is a InterconnectAttachmentsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListInterconnectAttachmentsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of interconnect + attachments. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListInterconnectAttachmentsRequest, dict]): + The request object. A request message for + InterconnectAttachments.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.interconnect_attachments.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListInterconnectAttachmentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListInterconnectAttachmentsRequest): + request = compute.AggregatedListInterconnectAttachmentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteInterconnectAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + interconnect_attachment: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified interconnect attachment. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInterconnectAttachmentRequest, dict]): + The request object. A request message for + InterconnectAttachments.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_attachment (str): + Name of the interconnect attachment + to delete. + + This corresponds to the ``interconnect_attachment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, interconnect_attachment]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInterconnectAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInterconnectAttachmentRequest): + request = compute.DeleteInterconnectAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if interconnect_attachment is not None: + request.interconnect_attachment = interconnect_attachment + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetInterconnectAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + interconnect_attachment: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InterconnectAttachment: + r"""Returns the specified interconnect attachment. + + Args: + request (Union[google.cloud.compute_v1.types.GetInterconnectAttachmentRequest, dict]): + The request object. A request message for + InterconnectAttachments.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_attachment (str): + Name of the interconnect attachment + to return. + + This corresponds to the ``interconnect_attachment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InterconnectAttachment: + Represents an Interconnect Attachment + (VLAN) resource. You can use + Interconnect attachments (VLANS) to + connect your Virtual Private Cloud + networks to your on-premises networks + through an Interconnect. For more + information, read Creating VLAN + Attachments. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, interconnect_attachment]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetInterconnectAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetInterconnectAttachmentRequest): + request = compute.GetInterconnectAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if interconnect_attachment is not None: + request.interconnect_attachment = interconnect_attachment + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertInterconnectAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + interconnect_attachment_resource: compute.InterconnectAttachment = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an InterconnectAttachment in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertInterconnectAttachmentRequest, dict]): + The request object. A request message for + InterconnectAttachments.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_attachment_resource (google.cloud.compute_v1.types.InterconnectAttachment): + The body resource for this request + This corresponds to the ``interconnect_attachment_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, interconnect_attachment_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertInterconnectAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertInterconnectAttachmentRequest): + request = compute.InsertInterconnectAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if interconnect_attachment_resource is not None: + request.interconnect_attachment_resource = interconnect_attachment_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListInterconnectAttachmentsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of interconnect attachments + contained within the specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListInterconnectAttachmentsRequest, dict]): + The request object. A request message for + InterconnectAttachments.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.interconnect_attachments.pagers.ListPager: + Response to the list request, and + contains a list of interconnect + attachments. Iterating over this object + will yield results and resolve + additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInterconnectAttachmentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInterconnectAttachmentsRequest): + request = compute.ListInterconnectAttachmentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchInterconnectAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + interconnect_attachment: str = None, + interconnect_attachment_resource: compute.InterconnectAttachment = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified interconnect attachment with + the data included in the request. This method supports + PATCH semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchInterconnectAttachmentRequest, dict]): + The request object. A request message for + InterconnectAttachments.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_attachment (str): + Name of the interconnect attachment + to patch. + + This corresponds to the ``interconnect_attachment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_attachment_resource (google.cloud.compute_v1.types.InterconnectAttachment): + The body resource for this request + This corresponds to the ``interconnect_attachment_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, interconnect_attachment, interconnect_attachment_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchInterconnectAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchInterconnectAttachmentRequest): + request = compute.PatchInterconnectAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if interconnect_attachment is not None: + request.interconnect_attachment = interconnect_attachment + if interconnect_attachment_resource is not None: + request.interconnect_attachment_resource = interconnect_attachment_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "InterconnectAttachmentsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/pagers.py new file mode 100644 index 000000000..9eda54e5b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InterconnectAttachmentAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InterconnectAttachmentAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InterconnectAttachmentAggregatedList], + request: compute.AggregatedListInterconnectAttachmentsRequest, + response: compute.InterconnectAttachmentAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListInterconnectAttachmentsRequest): + The initial request object. + response (google.cloud.compute_v1.types.InterconnectAttachmentAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListInterconnectAttachmentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InterconnectAttachmentAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.InterconnectAttachmentsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.InterconnectAttachmentsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InterconnectAttachmentList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InterconnectAttachmentList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InterconnectAttachmentList], + request: compute.ListInterconnectAttachmentsRequest, + response: compute.InterconnectAttachmentList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInterconnectAttachmentsRequest): + The initial request object. + response (google.cloud.compute_v1.types.InterconnectAttachmentList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInterconnectAttachmentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InterconnectAttachmentList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InterconnectAttachment]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/__init__.py new file mode 100644 index 000000000..79094e3ae --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import InterconnectAttachmentsTransport +from .rest import InterconnectAttachmentsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InterconnectAttachmentsTransport]] +_transport_registry['rest'] = InterconnectAttachmentsRestTransport + +__all__ = ( + 'InterconnectAttachmentsTransport', + 'InterconnectAttachmentsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/base.py new file mode 100644 index 000000000..d71d46b97 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InterconnectAttachmentsTransport(abc.ABC): + """Abstract transport class for InterconnectAttachments.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInterconnectAttachmentsRequest], + Union[ + compute.InterconnectAttachmentAggregatedList, + Awaitable[compute.InterconnectAttachmentAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteInterconnectAttachmentRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetInterconnectAttachmentRequest], + Union[ + compute.InterconnectAttachment, + Awaitable[compute.InterconnectAttachment] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertInterconnectAttachmentRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListInterconnectAttachmentsRequest], + Union[ + compute.InterconnectAttachmentList, + Awaitable[compute.InterconnectAttachmentList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchInterconnectAttachmentRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'InterconnectAttachmentsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/rest.py new file mode 100644 index 000000000..ea115ca46 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_attachments/transports/rest.py @@ -0,0 +1,772 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import InterconnectAttachmentsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class InterconnectAttachmentsRestTransport(InterconnectAttachmentsTransport): + """REST backend transport for InterconnectAttachments. + + The InterconnectAttachments API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListInterconnectAttachmentsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InterconnectAttachmentAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListInterconnectAttachmentsRequest): + The request object. A request message for + InterconnectAttachments.AggregatedList. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InterconnectAttachmentAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/interconnectAttachments', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListInterconnectAttachmentsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListInterconnectAttachmentsRequest.to_json( + compute.AggregatedListInterconnectAttachmentsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InterconnectAttachmentAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteInterconnectAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteInterconnectAttachmentRequest): + The request object. A request message for + InterconnectAttachments.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{interconnect_attachment}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect_attachment", + "interconnectAttachment" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteInterconnectAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInterconnectAttachmentRequest.to_json( + compute.DeleteInterconnectAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetInterconnectAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InterconnectAttachment: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetInterconnectAttachmentRequest): + The request object. A request message for + InterconnectAttachments.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InterconnectAttachment: + Represents an Interconnect Attachment + (VLAN) resource. You can use + Interconnect attachments (VLANS) to + connect your Virtual Private Cloud + networks to your on-premises networks + through an Interconnect. For more + information, read Creating VLAN + Attachments. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{interconnect_attachment}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect_attachment", + "interconnectAttachment" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetInterconnectAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetInterconnectAttachmentRequest.to_json( + compute.GetInterconnectAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InterconnectAttachment.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertInterconnectAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertInterconnectAttachmentRequest): + The request object. A request message for + InterconnectAttachments.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/interconnectAttachments', + 'body': 'interconnect_attachment_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertInterconnectAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InterconnectAttachment.to_json( + compute.InterconnectAttachment( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertInterconnectAttachmentRequest.to_json( + compute.InsertInterconnectAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListInterconnectAttachmentsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InterconnectAttachmentList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListInterconnectAttachmentsRequest): + The request object. A request message for + InterconnectAttachments.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InterconnectAttachmentList: + Response to the list request, and + contains a list of interconnect + attachments. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/interconnectAttachments', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListInterconnectAttachmentsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInterconnectAttachmentsRequest.to_json( + compute.ListInterconnectAttachmentsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InterconnectAttachmentList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchInterconnectAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchInterconnectAttachmentRequest): + The request object. A request message for + InterconnectAttachments.Patch. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{interconnect_attachment}', + 'body': 'interconnect_attachment_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect_attachment", + "interconnectAttachment" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchInterconnectAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InterconnectAttachment.to_json( + compute.InterconnectAttachment( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchInterconnectAttachmentRequest.to_json( + compute.PatchInterconnectAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListInterconnectAttachmentsRequest], + compute.InterconnectAttachmentAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteInterconnectAttachmentRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetInterconnectAttachmentRequest], + compute.InterconnectAttachment]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertInterconnectAttachmentRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListInterconnectAttachmentsRequest], + compute.InterconnectAttachmentList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchInterconnectAttachmentRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'InterconnectAttachmentsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/__init__.py new file mode 100644 index 000000000..5d4e01752 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import InterconnectLocationsClient + +__all__ = ( + 'InterconnectLocationsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/client.py new file mode 100644 index 000000000..f581798f1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/client.py @@ -0,0 +1,517 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.interconnect_locations import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import InterconnectLocationsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import InterconnectLocationsRestTransport + + +class InterconnectLocationsClientMeta(type): + """Metaclass for the InterconnectLocations client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[InterconnectLocationsTransport]] + _transport_registry["rest"] = InterconnectLocationsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[InterconnectLocationsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InterconnectLocationsClient(metaclass=InterconnectLocationsClientMeta): + """The InterconnectLocations API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InterconnectLocationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InterconnectLocationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InterconnectLocationsTransport: + """Returns the transport used by the client instance. + + Returns: + InterconnectLocationsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, InterconnectLocationsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the interconnect locations client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, InterconnectLocationsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InterconnectLocationsTransport): + # transport is a InterconnectLocationsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def get(self, + request: Union[compute.GetInterconnectLocationRequest, dict] = None, + *, + project: str = None, + interconnect_location: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InterconnectLocation: + r"""Returns the details for the specified interconnect + location. Gets a list of available interconnect + locations by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetInterconnectLocationRequest, dict]): + The request object. A request message for + InterconnectLocations.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_location (str): + Name of the interconnect location to + return. + + This corresponds to the ``interconnect_location`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InterconnectLocation: + Represents an Interconnect Attachment + (VLAN) Location resource. You can use + this resource to find location details + about an Interconnect attachment (VLAN). + For more information about interconnect + attachments, read Creating VLAN + Attachments. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, interconnect_location]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetInterconnectLocationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetInterconnectLocationRequest): + request = compute.GetInterconnectLocationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if interconnect_location is not None: + request.interconnect_location = interconnect_location + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListInterconnectLocationsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of interconnect locations + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListInterconnectLocationsRequest, dict]): + The request object. A request message for + InterconnectLocations.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.interconnect_locations.pagers.ListPager: + Response to the list request, and + contains a list of interconnect + locations. Iterating over this object + will yield results and resolve + additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInterconnectLocationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInterconnectLocationsRequest): + request = compute.ListInterconnectLocationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "InterconnectLocationsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/pagers.py new file mode 100644 index 000000000..c268651ad --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InterconnectLocationList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InterconnectLocationList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InterconnectLocationList], + request: compute.ListInterconnectLocationsRequest, + response: compute.InterconnectLocationList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInterconnectLocationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.InterconnectLocationList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInterconnectLocationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InterconnectLocationList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InterconnectLocation]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/__init__.py new file mode 100644 index 000000000..f14e7ea9c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import InterconnectLocationsTransport +from .rest import InterconnectLocationsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InterconnectLocationsTransport]] +_transport_registry['rest'] = InterconnectLocationsRestTransport + +__all__ = ( + 'InterconnectLocationsTransport', + 'InterconnectLocationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/base.py new file mode 100644 index 000000000..988fc749d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/base.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InterconnectLocationsTransport(abc.ABC): + """Abstract transport class for InterconnectLocations.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetInterconnectLocationRequest], + Union[ + compute.InterconnectLocation, + Awaitable[compute.InterconnectLocation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListInterconnectLocationsRequest], + Union[ + compute.InterconnectLocationList, + Awaitable[compute.InterconnectLocationList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'InterconnectLocationsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/rest.py new file mode 100644 index 000000000..964d80c8f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnect_locations/transports/rest.py @@ -0,0 +1,320 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import InterconnectLocationsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class InterconnectLocationsRestTransport(InterconnectLocationsTransport): + """REST backend transport for InterconnectLocations. + + The InterconnectLocations API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _get(self, + request: compute.GetInterconnectLocationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InterconnectLocation: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetInterconnectLocationRequest): + The request object. A request message for + InterconnectLocations.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InterconnectLocation: + Represents an Interconnect Attachment + (VLAN) Location resource. You can use + this resource to find location details + about an Interconnect attachment (VLAN). + For more information about interconnect + attachments, read Creating VLAN + Attachments. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/interconnectLocations/{interconnect_location}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect_location", + "interconnectLocation" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetInterconnectLocationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetInterconnectLocationRequest.to_json( + compute.GetInterconnectLocationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InterconnectLocation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListInterconnectLocationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InterconnectLocationList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListInterconnectLocationsRequest): + The request object. A request message for + InterconnectLocations.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InterconnectLocationList: + Response to the list request, and + contains a list of interconnect + locations. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/interconnectLocations', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListInterconnectLocationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInterconnectLocationsRequest.to_json( + compute.ListInterconnectLocationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InterconnectLocationList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def get(self) -> Callable[ + [compute.GetInterconnectLocationRequest], + compute.InterconnectLocation]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListInterconnectLocationsRequest], + compute.InterconnectLocationList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'InterconnectLocationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/__init__.py new file mode 100644 index 000000000..842331b02 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import InterconnectsClient + +__all__ = ( + 'InterconnectsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/client.py new file mode 100644 index 000000000..250e8a218 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/client.py @@ -0,0 +1,863 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.interconnects import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import InterconnectsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import InterconnectsRestTransport + + +class InterconnectsClientMeta(type): + """Metaclass for the Interconnects client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[InterconnectsTransport]] + _transport_registry["rest"] = InterconnectsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[InterconnectsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class InterconnectsClient(metaclass=InterconnectsClientMeta): + """The Interconnects API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InterconnectsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + InterconnectsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> InterconnectsTransport: + """Returns the transport used by the client instance. + + Returns: + InterconnectsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, InterconnectsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the interconnects client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, InterconnectsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, InterconnectsTransport): + # transport is a InterconnectsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteInterconnectRequest, dict] = None, + *, + project: str = None, + interconnect: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified interconnect. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInterconnectRequest, dict]): + The request object. A request message for + Interconnects.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect (str): + Name of the interconnect to delete. + This corresponds to the ``interconnect`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, interconnect]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInterconnectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInterconnectRequest): + request = compute.DeleteInterconnectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if interconnect is not None: + request.interconnect = interconnect + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetInterconnectRequest, dict] = None, + *, + project: str = None, + interconnect: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Interconnect: + r"""Returns the specified interconnect. Get a list of + available interconnects by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetInterconnectRequest, dict]): + The request object. A request message for + Interconnects.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect (str): + Name of the interconnect to return. + This corresponds to the ``interconnect`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Interconnect: + Represents an Interconnect resource. + An Interconnect resource is a dedicated + connection between the GCP network and + your on-premises network. For more + information, read the Dedicated + Interconnect Overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, interconnect]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetInterconnectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetInterconnectRequest): + request = compute.GetInterconnectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if interconnect is not None: + request.interconnect = interconnect + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_diagnostics(self, + request: Union[compute.GetDiagnosticsInterconnectRequest, dict] = None, + *, + project: str = None, + interconnect: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InterconnectsGetDiagnosticsResponse: + r"""Returns the interconnectDiagnostics for the specified + interconnect. + + Args: + request (Union[google.cloud.compute_v1.types.GetDiagnosticsInterconnectRequest, dict]): + The request object. A request message for + Interconnects.GetDiagnostics. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect (str): + Name of the interconnect resource to + query. + + This corresponds to the ``interconnect`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InterconnectsGetDiagnosticsResponse: + Response for the + InterconnectsGetDiagnosticsRequest. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, interconnect]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetDiagnosticsInterconnectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetDiagnosticsInterconnectRequest): + request = compute.GetDiagnosticsInterconnectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if interconnect is not None: + request.interconnect = interconnect + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_diagnostics] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertInterconnectRequest, dict] = None, + *, + project: str = None, + interconnect_resource: compute.Interconnect = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a Interconnect in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertInterconnectRequest, dict]): + The request object. A request message for + Interconnects.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_resource (google.cloud.compute_v1.types.Interconnect): + The body resource for this request + This corresponds to the ``interconnect_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, interconnect_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertInterconnectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertInterconnectRequest): + request = compute.InsertInterconnectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if interconnect_resource is not None: + request.interconnect_resource = interconnect_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListInterconnectsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of interconnect available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListInterconnectsRequest, dict]): + The request object. A request message for + Interconnects.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.interconnects.pagers.ListPager: + Response to the list request, and + contains a list of interconnects. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInterconnectsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInterconnectsRequest): + request = compute.ListInterconnectsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchInterconnectRequest, dict] = None, + *, + project: str = None, + interconnect: str = None, + interconnect_resource: compute.Interconnect = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified interconnect with the data + included in the request. This method supports PATCH + semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchInterconnectRequest, dict]): + The request object. A request message for + Interconnects.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect (str): + Name of the interconnect to update. + This corresponds to the ``interconnect`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + interconnect_resource (google.cloud.compute_v1.types.Interconnect): + The body resource for this request + This corresponds to the ``interconnect_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, interconnect, interconnect_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchInterconnectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchInterconnectRequest): + request = compute.PatchInterconnectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if interconnect is not None: + request.interconnect = interconnect + if interconnect_resource is not None: + request.interconnect_resource = interconnect_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "InterconnectsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/pagers.py new file mode 100644 index 000000000..161df7fc0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.InterconnectList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.InterconnectList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.InterconnectList], + request: compute.ListInterconnectsRequest, + response: compute.InterconnectList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInterconnectsRequest): + The initial request object. + response (google.cloud.compute_v1.types.InterconnectList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInterconnectsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.InterconnectList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Interconnect]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/__init__.py new file mode 100644 index 000000000..69a153e45 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import InterconnectsTransport +from .rest import InterconnectsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[InterconnectsTransport]] +_transport_registry['rest'] = InterconnectsRestTransport + +__all__ = ( + 'InterconnectsTransport', + 'InterconnectsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/base.py new file mode 100644 index 000000000..7d85d78a4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class InterconnectsTransport(abc.ABC): + """Abstract transport class for Interconnects.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_diagnostics: gapic_v1.method.wrap_method( + self.get_diagnostics, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteInterconnectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetInterconnectRequest], + Union[ + compute.Interconnect, + Awaitable[compute.Interconnect] + ]]: + raise NotImplementedError() + + @property + def get_diagnostics(self) -> Callable[ + [compute.GetDiagnosticsInterconnectRequest], + Union[ + compute.InterconnectsGetDiagnosticsResponse, + Awaitable[compute.InterconnectsGetDiagnosticsResponse] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertInterconnectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListInterconnectsRequest], + Union[ + compute.InterconnectList, + Awaitable[compute.InterconnectList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchInterconnectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'InterconnectsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/rest.py new file mode 100644 index 000000000..7ebf00cf5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/interconnects/transports/rest.py @@ -0,0 +1,755 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import InterconnectsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class InterconnectsRestTransport(InterconnectsTransport): + """REST backend transport for Interconnects. + + The Interconnects API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteInterconnectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteInterconnectRequest): + The request object. A request message for + Interconnects.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/interconnects/{interconnect}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect", + "interconnect" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteInterconnectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInterconnectRequest.to_json( + compute.DeleteInterconnectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetInterconnectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Interconnect: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetInterconnectRequest): + The request object. A request message for + Interconnects.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Interconnect: + Represents an Interconnect resource. + An Interconnect resource is a dedicated + connection between the GCP network and + your on-premises network. For more + information, read the Dedicated + Interconnect Overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/interconnects/{interconnect}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect", + "interconnect" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetInterconnectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetInterconnectRequest.to_json( + compute.GetInterconnectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Interconnect.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_diagnostics(self, + request: compute.GetDiagnosticsInterconnectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InterconnectsGetDiagnosticsResponse: + r"""Call the get diagnostics method over HTTP. + + Args: + request (~.compute.GetDiagnosticsInterconnectRequest): + The request object. A request message for + Interconnects.GetDiagnostics. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InterconnectsGetDiagnosticsResponse: + Response for the + InterconnectsGetDiagnosticsRequest. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/interconnects/{interconnect}/getDiagnostics', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect", + "interconnect" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetDiagnosticsInterconnectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetDiagnosticsInterconnectRequest.to_json( + compute.GetDiagnosticsInterconnectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InterconnectsGetDiagnosticsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertInterconnectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertInterconnectRequest): + The request object. A request message for + Interconnects.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/interconnects', + 'body': 'interconnect_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertInterconnectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Interconnect.to_json( + compute.Interconnect( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertInterconnectRequest.to_json( + compute.InsertInterconnectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListInterconnectsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InterconnectList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListInterconnectsRequest): + The request object. A request message for + Interconnects.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InterconnectList: + Response to the list request, and + contains a list of interconnects. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/interconnects', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListInterconnectsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInterconnectsRequest.to_json( + compute.ListInterconnectsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InterconnectList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchInterconnectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchInterconnectRequest): + The request object. A request message for + Interconnects.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/interconnects/{interconnect}', + 'body': 'interconnect_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "interconnect", + "interconnect" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchInterconnectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Interconnect.to_json( + compute.Interconnect( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchInterconnectRequest.to_json( + compute.PatchInterconnectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteInterconnectRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetInterconnectRequest], + compute.Interconnect]: + return self._get + @ property + def get_diagnostics(self) -> Callable[ + [compute.GetDiagnosticsInterconnectRequest], + compute.InterconnectsGetDiagnosticsResponse]: + return self._get_diagnostics + @ property + def insert(self) -> Callable[ + [compute.InsertInterconnectRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListInterconnectsRequest], + compute.InterconnectList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchInterconnectRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'InterconnectsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/__init__.py new file mode 100644 index 000000000..47df757f2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import LicenseCodesClient + +__all__ = ( + 'LicenseCodesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/client.py new file mode 100644 index 000000000..1f7eca32c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/client.py @@ -0,0 +1,520 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.types import compute +from .transports.base import LicenseCodesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import LicenseCodesRestTransport + + +class LicenseCodesClientMeta(type): + """Metaclass for the LicenseCodes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[LicenseCodesTransport]] + _transport_registry["rest"] = LicenseCodesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[LicenseCodesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class LicenseCodesClient(metaclass=LicenseCodesClientMeta): + """The LicenseCodes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + LicenseCodesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + LicenseCodesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> LicenseCodesTransport: + """Returns the transport used by the client instance. + + Returns: + LicenseCodesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, LicenseCodesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the license codes client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, LicenseCodesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, LicenseCodesTransport): + # transport is a LicenseCodesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def get(self, + request: Union[compute.GetLicenseCodeRequest, dict] = None, + *, + project: str = None, + license_code: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.LicenseCode: + r"""Return a specified license code. License codes are mirrored + across all projects that have permissions to read the License + Code. *Caution* This resource is intended for use only by + third-party partners who are creating Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.GetLicenseCodeRequest, dict]): + The request object. A request message for + LicenseCodes.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + license_code (str): + Number corresponding to the License + code resource to return. + + This corresponds to the ``license_code`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.LicenseCode: + Represents a License Code resource. A License Code is a + unique identifier used to represent a license resource. + *Caution* This resource is intended for use only by + third-party partners who are creating Cloud Marketplace + images. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, license_code]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetLicenseCodeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetLicenseCodeRequest): + request = compute.GetLicenseCodeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if license_code is not None: + request.license_code = license_code + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsLicenseCodeRequest, dict] = None, + *, + project: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the specified resource. + *Caution* This resource is intended for use only by third-party + partners who are creating Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsLicenseCodeRequest, dict]): + The request object. A request message for + LicenseCodes.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsLicenseCodeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsLicenseCodeRequest): + request = compute.TestIamPermissionsLicenseCodeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "LicenseCodesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/__init__.py new file mode 100644 index 000000000..dbaf8d76c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import LicenseCodesTransport +from .rest import LicenseCodesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[LicenseCodesTransport]] +_transport_registry['rest'] = LicenseCodesRestTransport + +__all__ = ( + 'LicenseCodesTransport', + 'LicenseCodesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/base.py new file mode 100644 index 000000000..50132551d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/base.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class LicenseCodesTransport(abc.ABC): + """Abstract transport class for LicenseCodes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetLicenseCodeRequest], + Union[ + compute.LicenseCode, + Awaitable[compute.LicenseCode] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsLicenseCodeRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'LicenseCodesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/rest.py new file mode 100644 index 000000000..b94b9e17e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/license_codes/transports/rest.py @@ -0,0 +1,328 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import LicenseCodesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class LicenseCodesRestTransport(LicenseCodesTransport): + """REST backend transport for LicenseCodes. + + The LicenseCodes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _get(self, + request: compute.GetLicenseCodeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.LicenseCode: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetLicenseCodeRequest): + The request object. A request message for + LicenseCodes.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.LicenseCode: + Represents a License Code resource. A License Code is a + unique identifier used to represent a license resource. + *Caution* This resource is intended for use only by + third-party partners who are creating Cloud Marketplace + images. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/licenseCodes/{license_code}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "license_code", + "licenseCode" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetLicenseCodeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetLicenseCodeRequest.to_json( + compute.GetLicenseCodeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.LicenseCode.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsLicenseCodeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsLicenseCodeRequest): + The request object. A request message for + LicenseCodes.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/licenseCodes/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsLicenseCodeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsLicenseCodeRequest.to_json( + compute.TestIamPermissionsLicenseCodeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def get(self) -> Callable[ + [compute.GetLicenseCodeRequest], + compute.LicenseCode]: + return self._get + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsLicenseCodeRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'LicenseCodesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/__init__.py new file mode 100644 index 000000000..b02047570 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import LicensesClient + +__all__ = ( + 'LicensesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/client.py new file mode 100644 index 000000000..ec5070a74 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/client.py @@ -0,0 +1,1020 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.licenses import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import LicensesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import LicensesRestTransport + + +class LicensesClientMeta(type): + """Metaclass for the Licenses client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[LicensesTransport]] + _transport_registry["rest"] = LicensesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[LicensesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class LicensesClient(metaclass=LicensesClientMeta): + """The Licenses API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + LicensesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + LicensesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> LicensesTransport: + """Returns the transport used by the client instance. + + Returns: + LicensesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, LicensesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the licenses client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, LicensesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, LicensesTransport): + # transport is a LicensesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteLicenseRequest, dict] = None, + *, + project: str = None, + license_: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified license. *Caution* This resource is + intended for use only by third-party partners who are creating + Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteLicenseRequest, dict]): + The request object. A request message for + Licenses.Delete. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + license_ (str): + Name of the license resource to + delete. + + This corresponds to the ``license_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, license_]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteLicenseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteLicenseRequest): + request = compute.DeleteLicenseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if license_ is not None: + request.license_ = license_ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetLicenseRequest, dict] = None, + *, + project: str = None, + license_: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.License: + r"""Returns the specified License resource. *Caution* This resource + is intended for use only by third-party partners who are + creating Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.GetLicenseRequest, dict]): + The request object. A request message for Licenses.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + license_ (str): + Name of the License resource to + return. + + This corresponds to the ``license_`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.License: + Represents a License resource. A License represents + billing and aggregate usage data for public and + marketplace images. *Caution* This resource is intended + for use only by third-party partners who are creating + Cloud Marketplace images. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, license_]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetLicenseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetLicenseRequest): + request = compute.GetLicenseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if license_ is not None: + request.license_ = license_ + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyLicenseRequest, dict] = None, + *, + project: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be empty if + no such policy or resource exists. *Caution* This resource is + intended for use only by third-party partners who are creating + Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyLicenseRequest, dict]): + The request object. A request message for + Licenses.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyLicenseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyLicenseRequest): + request = compute.GetIamPolicyLicenseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertLicenseRequest, dict] = None, + *, + project: str = None, + license_resource: compute.License = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Create a License resource in the specified project. *Caution* + This resource is intended for use only by third-party partners + who are creating Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.InsertLicenseRequest, dict]): + The request object. A request message for + Licenses.Insert. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + license_resource (google.cloud.compute_v1.types.License): + The body resource for this request + This corresponds to the ``license_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, license_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertLicenseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertLicenseRequest): + request = compute.InsertLicenseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if license_resource is not None: + request.license_resource = license_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListLicensesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of licenses available in the specified + project. This method does not get any licenses that belong to + other projects, including licenses attached to + publicly-available images, like Debian 9. If you want to get a + list of publicly-available licenses, use this method to make a + request to the respective image project, such as debian-cloud or + windows-cloud. *Caution* This resource is intended for use only + by third-party partners who are creating Cloud Marketplace + images. + + Args: + request (Union[google.cloud.compute_v1.types.ListLicensesRequest, dict]): + The request object. A request message for Licenses.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.licenses.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListLicensesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListLicensesRequest): + request = compute.ListLicensesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyLicenseRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_policy_request_resource: compute.GlobalSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified resource. + Replaces any existing policy. *Caution* This resource is + intended for use only by third-party partners who are creating + Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyLicenseRequest, dict]): + The request object. A request message for + Licenses.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + This corresponds to the ``global_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyLicenseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyLicenseRequest): + request = compute.SetIamPolicyLicenseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_policy_request_resource is not None: + request.global_set_policy_request_resource = global_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsLicenseRequest, dict] = None, + *, + project: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the specified resource. + *Caution* This resource is intended for use only by third-party + partners who are creating Cloud Marketplace images. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsLicenseRequest, dict]): + The request object. A request message for + Licenses.TestIamPermissions. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsLicenseRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsLicenseRequest): + request = compute.TestIamPermissionsLicenseRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "LicensesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/pagers.py new file mode 100644 index 000000000..7055c8dc9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.LicensesListResponse` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.LicensesListResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.LicensesListResponse], + request: compute.ListLicensesRequest, + response: compute.LicensesListResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListLicensesRequest): + The initial request object. + response (google.cloud.compute_v1.types.LicensesListResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListLicensesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.LicensesListResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.License]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/__init__.py new file mode 100644 index 000000000..27acd5d9a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import LicensesTransport +from .rest import LicensesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[LicensesTransport]] +_transport_registry['rest'] = LicensesRestTransport + +__all__ = ( + 'LicensesTransport', + 'LicensesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/base.py new file mode 100644 index 000000000..f20b13e7b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class LicensesTransport(abc.ABC): + """Abstract transport class for Licenses.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteLicenseRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetLicenseRequest], + Union[ + compute.License, + Awaitable[compute.License] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyLicenseRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertLicenseRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListLicensesRequest], + Union[ + compute.LicensesListResponse, + Awaitable[compute.LicensesListResponse] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyLicenseRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsLicenseRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'LicensesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/rest.py new file mode 100644 index 000000000..ce750e33d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/licenses/transports/rest.py @@ -0,0 +1,916 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import LicensesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class LicensesRestTransport(LicensesTransport): + """REST backend transport for Licenses. + + The Licenses API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteLicenseRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteLicenseRequest): + The request object. A request message for + Licenses.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/licenses/{license_}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "license_", + "license" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteLicenseRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteLicenseRequest.to_json( + compute.DeleteLicenseRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetLicenseRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.License: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetLicenseRequest): + The request object. A request message for Licenses.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.License: + Represents a License resource. A License represents + billing and aggregate usage data for public and + marketplace images. *Caution* This resource is intended + for use only by third-party partners who are creating + Cloud Marketplace images. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/licenses/{license_}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "license_", + "license" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetLicenseRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetLicenseRequest.to_json( + compute.GetLicenseRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.License.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyLicenseRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyLicenseRequest): + The request object. A request message for + Licenses.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/licenses/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyLicenseRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyLicenseRequest.to_json( + compute.GetIamPolicyLicenseRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertLicenseRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertLicenseRequest): + The request object. A request message for + Licenses.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/licenses', + 'body': 'license_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertLicenseRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.License.to_json( + compute.License( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertLicenseRequest.to_json( + compute.InsertLicenseRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListLicensesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.LicensesListResponse: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListLicensesRequest): + The request object. A request message for Licenses.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.LicensesListResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/licenses', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListLicensesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListLicensesRequest.to_json( + compute.ListLicensesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.LicensesListResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyLicenseRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyLicenseRequest): + The request object. A request message for + Licenses.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/licenses/{resource}/setIamPolicy', + 'body': 'global_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyLicenseRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetPolicyRequest.to_json( + compute.GlobalSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyLicenseRequest.to_json( + compute.SetIamPolicyLicenseRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsLicenseRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsLicenseRequest): + The request object. A request message for + Licenses.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/licenses/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsLicenseRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsLicenseRequest.to_json( + compute.TestIamPermissionsLicenseRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteLicenseRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetLicenseRequest], + compute.License]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyLicenseRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertLicenseRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListLicensesRequest], + compute.LicensesListResponse]: + return self._list + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyLicenseRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsLicenseRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'LicensesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/__init__.py new file mode 100644 index 000000000..d5641bcbc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import MachineTypesClient + +__all__ = ( + 'MachineTypesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/client.py new file mode 100644 index 000000000..d4524cc05 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/client.py @@ -0,0 +1,607 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.machine_types import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import MachineTypesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import MachineTypesRestTransport + + +class MachineTypesClientMeta(type): + """Metaclass for the MachineTypes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[MachineTypesTransport]] + _transport_registry["rest"] = MachineTypesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[MachineTypesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class MachineTypesClient(metaclass=MachineTypesClientMeta): + """The MachineTypes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MachineTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + MachineTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> MachineTypesTransport: + """Returns the transport used by the client instance. + + Returns: + MachineTypesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, MachineTypesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the machine types client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, MachineTypesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, MachineTypesTransport): + # transport is a MachineTypesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListMachineTypesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of machine types. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListMachineTypesRequest, dict]): + The request object. A request message for + MachineTypes.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.machine_types.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListMachineTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListMachineTypesRequest): + request = compute.AggregatedListMachineTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetMachineTypeRequest, dict] = None, + *, + project: str = None, + zone: str = None, + machine_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.MachineType: + r"""Returns the specified machine type. Gets a list of + available machine types by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetMachineTypeRequest, dict]): + The request object. A request message for + MachineTypes.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + machine_type (str): + Name of the machine type to return. + This corresponds to the ``machine_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.MachineType: + Represents a Machine Type resource. + You can use specific machine types for + your VM instances based on performance + and pricing requirements. For more + information, read Machine Types. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, machine_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetMachineTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetMachineTypeRequest): + request = compute.GetMachineTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if machine_type is not None: + request.machine_type = machine_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListMachineTypesRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of machine types available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListMachineTypesRequest, dict]): + The request object. A request message for + MachineTypes.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.machine_types.pagers.ListPager: + Contains a list of machine types. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListMachineTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListMachineTypesRequest): + request = compute.ListMachineTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "MachineTypesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/pagers.py new file mode 100644 index 000000000..d32b3cc53 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.MachineTypeAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.MachineTypeAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.MachineTypeAggregatedList], + request: compute.AggregatedListMachineTypesRequest, + response: compute.MachineTypeAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListMachineTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.MachineTypeAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListMachineTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.MachineTypeAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.MachineTypesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.MachineTypesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.MachineTypeList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.MachineTypeList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.MachineTypeList], + request: compute.ListMachineTypesRequest, + response: compute.MachineTypeList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListMachineTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.MachineTypeList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListMachineTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.MachineTypeList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.MachineType]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/__init__.py new file mode 100644 index 000000000..bb33478a4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import MachineTypesTransport +from .rest import MachineTypesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[MachineTypesTransport]] +_transport_registry['rest'] = MachineTypesRestTransport + +__all__ = ( + 'MachineTypesTransport', + 'MachineTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/base.py new file mode 100644 index 000000000..1aa02d321 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/base.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class MachineTypesTransport(abc.ABC): + """Abstract transport class for MachineTypes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListMachineTypesRequest], + Union[ + compute.MachineTypeAggregatedList, + Awaitable[compute.MachineTypeAggregatedList] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetMachineTypeRequest], + Union[ + compute.MachineType, + Awaitable[compute.MachineType] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListMachineTypesRequest], + Union[ + compute.MachineTypeList, + Awaitable[compute.MachineTypeList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'MachineTypesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/rest.py new file mode 100644 index 000000000..d1defaa54 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/machine_types/transports/rest.py @@ -0,0 +1,413 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import MachineTypesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class MachineTypesRestTransport(MachineTypesTransport): + """REST backend transport for MachineTypes. + + The MachineTypes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListMachineTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.MachineTypeAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListMachineTypesRequest): + The request object. A request message for + MachineTypes.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.MachineTypeAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/machineTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListMachineTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListMachineTypesRequest.to_json( + compute.AggregatedListMachineTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.MachineTypeAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetMachineTypeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.MachineType: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetMachineTypeRequest): + The request object. A request message for + MachineTypes.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.MachineType: + Represents a Machine Type resource. + You can use specific machine types for + your VM instances based on performance + and pricing requirements. For more + information, read Machine Types. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/machineTypes/{machine_type}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "machine_type", + "machineType" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetMachineTypeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetMachineTypeRequest.to_json( + compute.GetMachineTypeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.MachineType.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListMachineTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.MachineTypeList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListMachineTypesRequest): + The request object. A request message for + MachineTypes.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.MachineTypeList: + Contains a list of machine types. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/machineTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListMachineTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListMachineTypesRequest.to_json( + compute.ListMachineTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.MachineTypeList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListMachineTypesRequest], + compute.MachineTypeAggregatedList]: + return self._aggregated_list + @ property + def get(self) -> Callable[ + [compute.GetMachineTypeRequest], + compute.MachineType]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListMachineTypesRequest], + compute.MachineTypeList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'MachineTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/__init__.py new file mode 100644 index 000000000..a3021a7cb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NetworkEndpointGroupsClient + +__all__ = ( + 'NetworkEndpointGroupsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/client.py new file mode 100644 index 000000000..b33f8b3e8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/client.py @@ -0,0 +1,1246 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.network_endpoint_groups import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import NetworkEndpointGroupsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import NetworkEndpointGroupsRestTransport + + +class NetworkEndpointGroupsClientMeta(type): + """Metaclass for the NetworkEndpointGroups client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[NetworkEndpointGroupsTransport]] + _transport_registry["rest"] = NetworkEndpointGroupsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[NetworkEndpointGroupsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NetworkEndpointGroupsClient(metaclass=NetworkEndpointGroupsClientMeta): + """The NetworkEndpointGroups API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NetworkEndpointGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NetworkEndpointGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NetworkEndpointGroupsTransport: + """Returns the transport used by the client instance. + + Returns: + NetworkEndpointGroupsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NetworkEndpointGroupsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the network endpoint groups client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NetworkEndpointGroupsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, NetworkEndpointGroupsTransport): + # transport is a NetworkEndpointGroupsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListNetworkEndpointGroupsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of network endpoint groups and + sorts them by zone. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListNetworkEndpointGroupsRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.network_endpoint_groups.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListNetworkEndpointGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListNetworkEndpointGroupsRequest): + request = compute.AggregatedListNetworkEndpointGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def attach_network_endpoints(self, + request: Union[compute.AttachNetworkEndpointsNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + network_endpoint_group: str = None, + network_endpoint_groups_attach_endpoints_request_resource: compute.NetworkEndpointGroupsAttachEndpointsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Attach a list of network endpoints to the specified + network endpoint group. + + Args: + request (Union[google.cloud.compute_v1.types.AttachNetworkEndpointsNetworkEndpointGroupRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.AttachNetworkEndpoints. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group where you are attaching network + endpoints to. It should comply with + RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_groups_attach_endpoints_request_resource (google.cloud.compute_v1.types.NetworkEndpointGroupsAttachEndpointsRequest): + The body resource for this request + This corresponds to the ``network_endpoint_groups_attach_endpoints_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, network_endpoint_group, network_endpoint_groups_attach_endpoints_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AttachNetworkEndpointsNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AttachNetworkEndpointsNetworkEndpointGroupRequest): + request = compute.AttachNetworkEndpointsNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + if network_endpoint_groups_attach_endpoints_request_resource is not None: + request.network_endpoint_groups_attach_endpoints_request_resource = network_endpoint_groups_attach_endpoints_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.attach_network_endpoints] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + network_endpoint_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified network endpoint group. The + network endpoints in the NEG and the VM instances they + belong to are not terminated when the NEG is deleted. + Note that the NEG cannot be deleted if there are backend + services referencing it. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteNetworkEndpointGroupRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group to delete. It should comply with + RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, network_endpoint_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteNetworkEndpointGroupRequest): + request = compute.DeleteNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def detach_network_endpoints(self, + request: Union[compute.DetachNetworkEndpointsNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + network_endpoint_group: str = None, + network_endpoint_groups_detach_endpoints_request_resource: compute.NetworkEndpointGroupsDetachEndpointsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Detach a list of network endpoints from the specified + network endpoint group. + + Args: + request (Union[google.cloud.compute_v1.types.DetachNetworkEndpointsNetworkEndpointGroupRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.DetachNetworkEndpoints. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group where you are removing network + endpoints. It should comply with + RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_groups_detach_endpoints_request_resource (google.cloud.compute_v1.types.NetworkEndpointGroupsDetachEndpointsRequest): + The body resource for this request + This corresponds to the ``network_endpoint_groups_detach_endpoints_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, network_endpoint_group, network_endpoint_groups_detach_endpoints_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DetachNetworkEndpointsNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DetachNetworkEndpointsNetworkEndpointGroupRequest): + request = compute.DetachNetworkEndpointsNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + if network_endpoint_groups_detach_endpoints_request_resource is not None: + request.network_endpoint_groups_detach_endpoints_request_resource = network_endpoint_groups_detach_endpoints_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.detach_network_endpoints] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + network_endpoint_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NetworkEndpointGroup: + r"""Returns the specified network endpoint group. Gets a + list of available network endpoint groups by making a + list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetNetworkEndpointGroupRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group. It should comply with RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NetworkEndpointGroup: + Represents a collection of network + endpoints. A network endpoint group + (NEG) defines how a set of endpoints + should be reached, whether they are + reachable, and where they are located. + For more information about using NEGs, + see Setting up external HTTP(S) Load + Balancing with internet NEGs, Setting up + zonal NEGs, or Setting up external + HTTP(S) Load Balancing with serverless + NEGs. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, network_endpoint_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetNetworkEndpointGroupRequest): + request = compute.GetNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + network_endpoint_group_resource: compute.NetworkEndpointGroup = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a network endpoint group in the specified + project using the parameters that are included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertNetworkEndpointGroupRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where you want + to create the network endpoint group. It + should comply with RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group_resource (google.cloud.compute_v1.types.NetworkEndpointGroup): + The body resource for this request + This corresponds to the ``network_endpoint_group_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, network_endpoint_group_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertNetworkEndpointGroupRequest): + request = compute.InsertNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if network_endpoint_group_resource is not None: + request.network_endpoint_group_resource = network_endpoint_group_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListNetworkEndpointGroupsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of network endpoint groups that + are located in the specified project and zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListNetworkEndpointGroupsRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.network_endpoint_groups.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNetworkEndpointGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNetworkEndpointGroupsRequest): + request = compute.ListNetworkEndpointGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_network_endpoints(self, + request: Union[compute.ListNetworkEndpointsNetworkEndpointGroupsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + network_endpoint_group: str = None, + network_endpoint_groups_list_endpoints_request_resource: compute.NetworkEndpointGroupsListEndpointsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNetworkEndpointsPager: + r"""Lists the network endpoints in the specified network + endpoint group. + + Args: + request (Union[google.cloud.compute_v1.types.ListNetworkEndpointsNetworkEndpointGroupsRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.ListNetworkEndpoints. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group from which you want to generate a + list of included network endpoints. It + should comply with RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_groups_list_endpoints_request_resource (google.cloud.compute_v1.types.NetworkEndpointGroupsListEndpointsRequest): + The body resource for this request + This corresponds to the ``network_endpoint_groups_list_endpoints_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.network_endpoint_groups.pagers.ListNetworkEndpointsPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, network_endpoint_group, network_endpoint_groups_list_endpoints_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNetworkEndpointsNetworkEndpointGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNetworkEndpointsNetworkEndpointGroupsRequest): + request = compute.ListNetworkEndpointsNetworkEndpointGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + if network_endpoint_groups_list_endpoints_request_resource is not None: + request.network_endpoint_groups_list_endpoints_request_resource = network_endpoint_groups_list_endpoints_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_network_endpoints] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNetworkEndpointsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsNetworkEndpointGroupRequest, dict]): + The request object. A request message for + NetworkEndpointGroups.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsNetworkEndpointGroupRequest): + request = compute.TestIamPermissionsNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "NetworkEndpointGroupsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/pagers.py new file mode 100644 index 000000000..2b87d51f8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/pagers.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NetworkEndpointGroupAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NetworkEndpointGroupAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NetworkEndpointGroupAggregatedList], + request: compute.AggregatedListNetworkEndpointGroupsRequest, + response: compute.NetworkEndpointGroupAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListNetworkEndpointGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NetworkEndpointGroupAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListNetworkEndpointGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NetworkEndpointGroupAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.NetworkEndpointGroupsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.NetworkEndpointGroupsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NetworkEndpointGroupList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NetworkEndpointGroupList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NetworkEndpointGroupList], + request: compute.ListNetworkEndpointGroupsRequest, + response: compute.NetworkEndpointGroupList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNetworkEndpointGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NetworkEndpointGroupList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNetworkEndpointGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NetworkEndpointGroupList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NetworkEndpointGroup]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListNetworkEndpointsPager: + """A pager for iterating through ``list_network_endpoints`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NetworkEndpointGroupsListNetworkEndpoints` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNetworkEndpoints`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NetworkEndpointGroupsListNetworkEndpoints` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NetworkEndpointGroupsListNetworkEndpoints], + request: compute.ListNetworkEndpointsNetworkEndpointGroupsRequest, + response: compute.NetworkEndpointGroupsListNetworkEndpoints, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNetworkEndpointsNetworkEndpointGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NetworkEndpointGroupsListNetworkEndpoints): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNetworkEndpointsNetworkEndpointGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NetworkEndpointGroupsListNetworkEndpoints]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NetworkEndpointWithHealthStatus]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/__init__.py new file mode 100644 index 000000000..ced81ffb8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NetworkEndpointGroupsTransport +from .rest import NetworkEndpointGroupsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[NetworkEndpointGroupsTransport]] +_transport_registry['rest'] = NetworkEndpointGroupsRestTransport + +__all__ = ( + 'NetworkEndpointGroupsTransport', + 'NetworkEndpointGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/base.py new file mode 100644 index 000000000..9334dcd91 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class NetworkEndpointGroupsTransport(abc.ABC): + """Abstract transport class for NetworkEndpointGroups.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.attach_network_endpoints: gapic_v1.method.wrap_method( + self.attach_network_endpoints, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.detach_network_endpoints: gapic_v1.method.wrap_method( + self.detach_network_endpoints, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_network_endpoints: gapic_v1.method.wrap_method( + self.list_network_endpoints, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNetworkEndpointGroupsRequest], + Union[ + compute.NetworkEndpointGroupAggregatedList, + Awaitable[compute.NetworkEndpointGroupAggregatedList] + ]]: + raise NotImplementedError() + + @property + def attach_network_endpoints(self) -> Callable[ + [compute.AttachNetworkEndpointsNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def detach_network_endpoints(self) -> Callable[ + [compute.DetachNetworkEndpointsNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetNetworkEndpointGroupRequest], + Union[ + compute.NetworkEndpointGroup, + Awaitable[compute.NetworkEndpointGroup] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListNetworkEndpointGroupsRequest], + Union[ + compute.NetworkEndpointGroupList, + Awaitable[compute.NetworkEndpointGroupList] + ]]: + raise NotImplementedError() + + @property + def list_network_endpoints(self) -> Callable[ + [compute.ListNetworkEndpointsNetworkEndpointGroupsRequest], + Union[ + compute.NetworkEndpointGroupsListNetworkEndpoints, + Awaitable[compute.NetworkEndpointGroupsListNetworkEndpoints] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsNetworkEndpointGroupRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'NetworkEndpointGroupsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/rest.py new file mode 100644 index 000000000..6908e8eb2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/network_endpoint_groups/transports/rest.py @@ -0,0 +1,1108 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import NetworkEndpointGroupsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class NetworkEndpointGroupsRestTransport(NetworkEndpointGroupsTransport): + """REST backend transport for NetworkEndpointGroups. + + The NetworkEndpointGroups API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListNetworkEndpointGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroupAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListNetworkEndpointGroupsRequest): + The request object. A request message for + NetworkEndpointGroups.AggregatedList. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroupAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/networkEndpointGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListNetworkEndpointGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListNetworkEndpointGroupsRequest.to_json( + compute.AggregatedListNetworkEndpointGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroupAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _attach_network_endpoints(self, + request: compute.AttachNetworkEndpointsNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the attach network endpoints method over HTTP. + + Args: + request (~.compute.AttachNetworkEndpointsNetworkEndpointGroupRequest): + The request object. A request message for + NetworkEndpointGroups.AttachNetworkEndpoints. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}/attachNetworkEndpoints', + 'body': 'network_endpoint_groups_attach_endpoints_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AttachNetworkEndpointsNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworkEndpointGroupsAttachEndpointsRequest.to_json( + compute.NetworkEndpointGroupsAttachEndpointsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AttachNetworkEndpointsNetworkEndpointGroupRequest.to_json( + compute.AttachNetworkEndpointsNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteNetworkEndpointGroupRequest): + The request object. A request message for + NetworkEndpointGroups.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteNetworkEndpointGroupRequest.to_json( + compute.DeleteNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _detach_network_endpoints(self, + request: compute.DetachNetworkEndpointsNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the detach network endpoints method over HTTP. + + Args: + request (~.compute.DetachNetworkEndpointsNetworkEndpointGroupRequest): + The request object. A request message for + NetworkEndpointGroups.DetachNetworkEndpoints. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}/detachNetworkEndpoints', + 'body': 'network_endpoint_groups_detach_endpoints_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DetachNetworkEndpointsNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworkEndpointGroupsDetachEndpointsRequest.to_json( + compute.NetworkEndpointGroupsDetachEndpointsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DetachNetworkEndpointsNetworkEndpointGroupRequest.to_json( + compute.DetachNetworkEndpointsNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroup: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetNetworkEndpointGroupRequest): + The request object. A request message for + NetworkEndpointGroups.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroup: + Represents a collection of network + endpoints. A network endpoint group + (NEG) defines how a set of endpoints + should be reached, whether they are + reachable, and where they are located. + For more information about using NEGs, + see Setting up external HTTP(S) Load + Balancing with internet NEGs, Setting up + zonal NEGs, or Setting up external + HTTP(S) Load Balancing with serverless + NEGs. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetNetworkEndpointGroupRequest.to_json( + compute.GetNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroup.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertNetworkEndpointGroupRequest): + The request object. A request message for + NetworkEndpointGroups.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups', + 'body': 'network_endpoint_group_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworkEndpointGroup.to_json( + compute.NetworkEndpointGroup( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertNetworkEndpointGroupRequest.to_json( + compute.InsertNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListNetworkEndpointGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroupList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListNetworkEndpointGroupsRequest): + The request object. A request message for + NetworkEndpointGroups.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroupList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListNetworkEndpointGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNetworkEndpointGroupsRequest.to_json( + compute.ListNetworkEndpointGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroupList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_network_endpoints(self, + request: compute.ListNetworkEndpointsNetworkEndpointGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroupsListNetworkEndpoints: + r"""Call the list network endpoints method over HTTP. + + Args: + request (~.compute.ListNetworkEndpointsNetworkEndpointGroupsRequest): + The request object. A request message for + NetworkEndpointGroups.ListNetworkEndpoints. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroupsListNetworkEndpoints: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}/listNetworkEndpoints', + 'body': 'network_endpoint_groups_list_endpoints_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListNetworkEndpointsNetworkEndpointGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworkEndpointGroupsListEndpointsRequest.to_json( + compute.NetworkEndpointGroupsListEndpointsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNetworkEndpointsNetworkEndpointGroupsRequest.to_json( + compute.ListNetworkEndpointsNetworkEndpointGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroupsListNetworkEndpoints.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsNetworkEndpointGroupRequest): + The request object. A request message for + NetworkEndpointGroups.TestIamPermissions. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.TestIamPermissionsNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsNetworkEndpointGroupRequest.to_json( + compute.TestIamPermissionsNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNetworkEndpointGroupsRequest], + compute.NetworkEndpointGroupAggregatedList]: + return self._aggregated_list + @ property + def attach_network_endpoints(self) -> Callable[ + [compute.AttachNetworkEndpointsNetworkEndpointGroupRequest], + compute.Operation]: + return self._attach_network_endpoints + @ property + def delete(self) -> Callable[ + [compute.DeleteNetworkEndpointGroupRequest], + compute.Operation]: + return self._delete + @ property + def detach_network_endpoints(self) -> Callable[ + [compute.DetachNetworkEndpointsNetworkEndpointGroupRequest], + compute.Operation]: + return self._detach_network_endpoints + @ property + def get(self) -> Callable[ + [compute.GetNetworkEndpointGroupRequest], + compute.NetworkEndpointGroup]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertNetworkEndpointGroupRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListNetworkEndpointGroupsRequest], + compute.NetworkEndpointGroupList]: + return self._list + @ property + def list_network_endpoints(self) -> Callable[ + [compute.ListNetworkEndpointsNetworkEndpointGroupsRequest], + compute.NetworkEndpointGroupsListNetworkEndpoints]: + return self._list_network_endpoints + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsNetworkEndpointGroupRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'NetworkEndpointGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/__init__.py new file mode 100644 index 000000000..3c9da272f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NetworksClient + +__all__ = ( + 'NetworksClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/client.py new file mode 100644 index 000000000..620466265 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/client.py @@ -0,0 +1,1317 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.networks import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import NetworksTransport, DEFAULT_CLIENT_INFO +from .transports.rest import NetworksRestTransport + + +class NetworksClientMeta(type): + """Metaclass for the Networks client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[NetworksTransport]] + _transport_registry["rest"] = NetworksRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[NetworksTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NetworksClient(metaclass=NetworksClientMeta): + """The Networks API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NetworksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NetworksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NetworksTransport: + """Returns the transport used by the client instance. + + Returns: + NetworksTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NetworksTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the networks client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NetworksTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, NetworksTransport): + # transport is a NetworksTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_peering(self, + request: Union[compute.AddPeeringNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + networks_add_peering_request_resource: compute.NetworksAddPeeringRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds a peering to the specified network. + + Args: + request (Union[google.cloud.compute_v1.types.AddPeeringNetworkRequest, dict]): + The request object. A request message for + Networks.AddPeering. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network resource to add + peering to. + + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + networks_add_peering_request_resource (google.cloud.compute_v1.types.NetworksAddPeeringRequest): + The body resource for this request + This corresponds to the ``networks_add_peering_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network, networks_add_peering_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddPeeringNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddPeeringNetworkRequest): + request = compute.AddPeeringNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + if networks_add_peering_request_resource is not None: + request.networks_add_peering_request_resource = networks_add_peering_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_peering] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified network. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteNetworkRequest, dict]): + The request object. A request message for + Networks.Delete. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network to delete. + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteNetworkRequest): + request = compute.DeleteNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Network: + r"""Returns the specified network. Gets a list of + available networks by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetNetworkRequest, dict]): + The request object. A request message for Networks.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network to return. + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Network: + Represents a VPC Network resource. + Networks connect resources to each other + and to the internet. For more + information, read Virtual Private Cloud + (VPC) Network. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetNetworkRequest): + request = compute.GetNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_effective_firewalls(self, + request: Union[compute.GetEffectiveFirewallsNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NetworksGetEffectiveFirewallsResponse: + r"""Returns the effective firewalls on a given network. + + Args: + request (Union[google.cloud.compute_v1.types.GetEffectiveFirewallsNetworkRequest, dict]): + The request object. A request message for + Networks.GetEffectiveFirewalls. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network for this request. + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NetworksGetEffectiveFirewallsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetEffectiveFirewallsNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetEffectiveFirewallsNetworkRequest): + request = compute.GetEffectiveFirewallsNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_effective_firewalls] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertNetworkRequest, dict] = None, + *, + project: str = None, + network_resource: compute.Network = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a network in the specified project using the + data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertNetworkRequest, dict]): + The request object. A request message for + Networks.Insert. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_resource (google.cloud.compute_v1.types.Network): + The body resource for this request + This corresponds to the ``network_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertNetworkRequest): + request = compute.InsertNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network_resource is not None: + request.network_resource = network_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListNetworksRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of networks available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListNetworksRequest, dict]): + The request object. A request message for Networks.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.networks.pagers.ListPager: + Contains a list of networks. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNetworksRequest): + request = compute.ListNetworksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_peering_routes(self, + request: Union[compute.ListPeeringRoutesNetworksRequest, dict] = None, + *, + project: str = None, + network: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPeeringRoutesPager: + r"""Lists the peering routes exchanged over peering + connection. + + Args: + request (Union[google.cloud.compute_v1.types.ListPeeringRoutesNetworksRequest, dict]): + The request object. A request message for + Networks.ListPeeringRoutes. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network for this request. + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.networks.pagers.ListPeeringRoutesPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListPeeringRoutesNetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListPeeringRoutesNetworksRequest): + request = compute.ListPeeringRoutesNetworksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_peering_routes] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPeeringRoutesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + network_resource: compute.Network = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified network with the data included + in the request. Only the following fields can be + modified: routingConfig.routingMode. + + Args: + request (Union[google.cloud.compute_v1.types.PatchNetworkRequest, dict]): + The request object. A request message for + Networks.Patch. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network to update. + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_resource (google.cloud.compute_v1.types.Network): + The body resource for this request + This corresponds to the ``network_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network, network_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchNetworkRequest): + request = compute.PatchNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + if network_resource is not None: + request.network_resource = network_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_peering(self, + request: Union[compute.RemovePeeringNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + networks_remove_peering_request_resource: compute.NetworksRemovePeeringRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes a peering from the specified network. + + Args: + request (Union[google.cloud.compute_v1.types.RemovePeeringNetworkRequest, dict]): + The request object. A request message for + Networks.RemovePeering. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network resource to + remove peering from. + + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + networks_remove_peering_request_resource (google.cloud.compute_v1.types.NetworksRemovePeeringRequest): + The body resource for this request + This corresponds to the ``networks_remove_peering_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network, networks_remove_peering_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemovePeeringNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemovePeeringNetworkRequest): + request = compute.RemovePeeringNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + if networks_remove_peering_request_resource is not None: + request.networks_remove_peering_request_resource = networks_remove_peering_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_peering] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def switch_to_custom_mode(self, + request: Union[compute.SwitchToCustomModeNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Switches the network mode from auto subnet mode to + custom subnet mode. + + Args: + request (Union[google.cloud.compute_v1.types.SwitchToCustomModeNetworkRequest, dict]): + The request object. A request message for + Networks.SwitchToCustomMode. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network to be updated. + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SwitchToCustomModeNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SwitchToCustomModeNetworkRequest): + request = compute.SwitchToCustomModeNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.switch_to_custom_mode] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_peering(self, + request: Union[compute.UpdatePeeringNetworkRequest, dict] = None, + *, + project: str = None, + network: str = None, + networks_update_peering_request_resource: compute.NetworksUpdatePeeringRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified network peering with the data included in + the request. You can only modify the + NetworkPeering.export_custom_routes field and the + NetworkPeering.import_custom_routes field. + + Args: + request (Union[google.cloud.compute_v1.types.UpdatePeeringNetworkRequest, dict]): + The request object. A request message for + Networks.UpdatePeering. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network (str): + Name of the network resource which + the updated peering is belonging to. + + This corresponds to the ``network`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + networks_update_peering_request_resource (google.cloud.compute_v1.types.NetworksUpdatePeeringRequest): + The body resource for this request + This corresponds to the ``networks_update_peering_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, network, networks_update_peering_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdatePeeringNetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdatePeeringNetworkRequest): + request = compute.UpdatePeeringNetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if network is not None: + request.network = network + if networks_update_peering_request_resource is not None: + request.networks_update_peering_request_resource = networks_update_peering_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_peering] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "NetworksClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/pagers.py new file mode 100644 index 000000000..aaab7e831 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/pagers.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NetworkList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NetworkList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NetworkList], + request: compute.ListNetworksRequest, + response: compute.NetworkList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNetworksRequest): + The initial request object. + response (google.cloud.compute_v1.types.NetworkList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NetworkList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Network]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPeeringRoutesPager: + """A pager for iterating through ``list_peering_routes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ExchangedPeeringRoutesList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPeeringRoutes`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ExchangedPeeringRoutesList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ExchangedPeeringRoutesList], + request: compute.ListPeeringRoutesNetworksRequest, + response: compute.ExchangedPeeringRoutesList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListPeeringRoutesNetworksRequest): + The initial request object. + response (google.cloud.compute_v1.types.ExchangedPeeringRoutesList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListPeeringRoutesNetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ExchangedPeeringRoutesList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ExchangedPeeringRoute]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/__init__.py new file mode 100644 index 000000000..fd50f6355 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NetworksTransport +from .rest import NetworksRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[NetworksTransport]] +_transport_registry['rest'] = NetworksRestTransport + +__all__ = ( + 'NetworksTransport', + 'NetworksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/base.py new file mode 100644 index 000000000..9e58c3fdb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/base.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class NetworksTransport(abc.ABC): + """Abstract transport class for Networks.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_peering: gapic_v1.method.wrap_method( + self.add_peering, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_effective_firewalls: gapic_v1.method.wrap_method( + self.get_effective_firewalls, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_peering_routes: gapic_v1.method.wrap_method( + self.list_peering_routes, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.remove_peering: gapic_v1.method.wrap_method( + self.remove_peering, + default_timeout=None, + client_info=client_info, + ), + self.switch_to_custom_mode: gapic_v1.method.wrap_method( + self.switch_to_custom_mode, + default_timeout=None, + client_info=client_info, + ), + self.update_peering: gapic_v1.method.wrap_method( + self.update_peering, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_peering(self) -> Callable[ + [compute.AddPeeringNetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteNetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetNetworkRequest], + Union[ + compute.Network, + Awaitable[compute.Network] + ]]: + raise NotImplementedError() + + @property + def get_effective_firewalls(self) -> Callable[ + [compute.GetEffectiveFirewallsNetworkRequest], + Union[ + compute.NetworksGetEffectiveFirewallsResponse, + Awaitable[compute.NetworksGetEffectiveFirewallsResponse] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertNetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListNetworksRequest], + Union[ + compute.NetworkList, + Awaitable[compute.NetworkList] + ]]: + raise NotImplementedError() + + @property + def list_peering_routes(self) -> Callable[ + [compute.ListPeeringRoutesNetworksRequest], + Union[ + compute.ExchangedPeeringRoutesList, + Awaitable[compute.ExchangedPeeringRoutesList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchNetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def remove_peering(self) -> Callable[ + [compute.RemovePeeringNetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def switch_to_custom_mode(self) -> Callable[ + [compute.SwitchToCustomModeNetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update_peering(self) -> Callable[ + [compute.UpdatePeeringNetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'NetworksTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/rest.py new file mode 100644 index 000000000..9de5430ca --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/networks/transports/rest.py @@ -0,0 +1,1304 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import NetworksTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class NetworksRestTransport(NetworksTransport): + """REST backend transport for Networks. + + The Networks API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_peering(self, + request: compute.AddPeeringNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add peering method over HTTP. + + Args: + request (~.compute.AddPeeringNetworkRequest): + The request object. A request message for + Networks.AddPeering. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}/addPeering', + 'body': 'networks_add_peering_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AddPeeringNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworksAddPeeringRequest.to_json( + compute.NetworksAddPeeringRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddPeeringNetworkRequest.to_json( + compute.AddPeeringNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteNetworkRequest): + The request object. A request message for + Networks.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DeleteNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteNetworkRequest.to_json( + compute.DeleteNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Network: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetNetworkRequest): + The request object. A request message for Networks.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Network: + Represents a VPC Network resource. + Networks connect resources to each other + and to the internet. For more + information, read Virtual Private Cloud + (VPC) Network. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetNetworkRequest.to_json( + compute.GetNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Network.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_effective_firewalls(self, + request: compute.GetEffectiveFirewallsNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworksGetEffectiveFirewallsResponse: + r"""Call the get effective firewalls method over HTTP. + + Args: + request (~.compute.GetEffectiveFirewallsNetworkRequest): + The request object. A request message for + Networks.GetEffectiveFirewalls. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworksGetEffectiveFirewallsResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}/getEffectiveFirewalls', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetEffectiveFirewallsNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetEffectiveFirewallsNetworkRequest.to_json( + compute.GetEffectiveFirewallsNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworksGetEffectiveFirewallsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertNetworkRequest): + The request object. A request message for + Networks.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networks', + 'body': 'network_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Network.to_json( + compute.Network( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertNetworkRequest.to_json( + compute.InsertNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListNetworksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListNetworksRequest): + The request object. A request message for Networks.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkList: + Contains a list of networks. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/networks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListNetworksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNetworksRequest.to_json( + compute.ListNetworksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_peering_routes(self, + request: compute.ListPeeringRoutesNetworksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ExchangedPeeringRoutesList: + r"""Call the list peering routes method over HTTP. + + Args: + request (~.compute.ListPeeringRoutesNetworksRequest): + The request object. A request message for + Networks.ListPeeringRoutes. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ExchangedPeeringRoutesList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}/listPeeringRoutes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListPeeringRoutesNetworksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListPeeringRoutesNetworksRequest.to_json( + compute.ListPeeringRoutesNetworksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ExchangedPeeringRoutesList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchNetworkRequest): + The request object. A request message for Networks.Patch. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}', + 'body': 'network_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.PatchNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Network.to_json( + compute.Network( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchNetworkRequest.to_json( + compute.PatchNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_peering(self, + request: compute.RemovePeeringNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove peering method over HTTP. + + Args: + request (~.compute.RemovePeeringNetworkRequest): + The request object. A request message for + Networks.RemovePeering. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}/removePeering', + 'body': 'networks_remove_peering_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.RemovePeeringNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworksRemovePeeringRequest.to_json( + compute.NetworksRemovePeeringRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemovePeeringNetworkRequest.to_json( + compute.RemovePeeringNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _switch_to_custom_mode(self, + request: compute.SwitchToCustomModeNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the switch to custom mode method over HTTP. + + Args: + request (~.compute.SwitchToCustomModeNetworkRequest): + The request object. A request message for + Networks.SwitchToCustomMode. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}/switchToCustomMode', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.SwitchToCustomModeNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SwitchToCustomModeNetworkRequest.to_json( + compute.SwitchToCustomModeNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update_peering(self, + request: compute.UpdatePeeringNetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update peering method over HTTP. + + Args: + request (~.compute.UpdatePeeringNetworkRequest): + The request object. A request message for + Networks.UpdatePeering. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/networks/{network}/updatePeering', + 'body': 'networks_update_peering_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network", + "network" + ), + ( + "project", + "project" + ), + ] + + request_kwargs = compute.UpdatePeeringNetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworksUpdatePeeringRequest.to_json( + compute.NetworksUpdatePeeringRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdatePeeringNetworkRequest.to_json( + compute.UpdatePeeringNetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_peering(self) -> Callable[ + [compute.AddPeeringNetworkRequest], + compute.Operation]: + return self._add_peering + @ property + def delete(self) -> Callable[ + [compute.DeleteNetworkRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetNetworkRequest], + compute.Network]: + return self._get + @ property + def get_effective_firewalls(self) -> Callable[ + [compute.GetEffectiveFirewallsNetworkRequest], + compute.NetworksGetEffectiveFirewallsResponse]: + return self._get_effective_firewalls + @ property + def insert(self) -> Callable[ + [compute.InsertNetworkRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListNetworksRequest], + compute.NetworkList]: + return self._list + @ property + def list_peering_routes(self) -> Callable[ + [compute.ListPeeringRoutesNetworksRequest], + compute.ExchangedPeeringRoutesList]: + return self._list_peering_routes + @ property + def patch(self) -> Callable[ + [compute.PatchNetworkRequest], + compute.Operation]: + return self._patch + @ property + def remove_peering(self) -> Callable[ + [compute.RemovePeeringNetworkRequest], + compute.Operation]: + return self._remove_peering + @ property + def switch_to_custom_mode(self) -> Callable[ + [compute.SwitchToCustomModeNetworkRequest], + compute.Operation]: + return self._switch_to_custom_mode + @ property + def update_peering(self) -> Callable[ + [compute.UpdatePeeringNetworkRequest], + compute.Operation]: + return self._update_peering + def close(self): + self._session.close() + + +__all__=( + 'NetworksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/__init__.py new file mode 100644 index 000000000..bb9b25a6e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NodeGroupsClient + +__all__ = ( + 'NodeGroupsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/client.py new file mode 100644 index 000000000..0a58ab6c5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/client.py @@ -0,0 +1,1694 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.node_groups import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import NodeGroupsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import NodeGroupsRestTransport + + +class NodeGroupsClientMeta(type): + """Metaclass for the NodeGroups client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[NodeGroupsTransport]] + _transport_registry["rest"] = NodeGroupsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[NodeGroupsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NodeGroupsClient(metaclass=NodeGroupsClientMeta): + """The NodeGroups API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NodeGroupsTransport: + """Returns the transport used by the client instance. + + Returns: + NodeGroupsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NodeGroupsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the node groups client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NodeGroupsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, NodeGroupsTransport): + # transport is a NodeGroupsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_nodes(self, + request: Union[compute.AddNodesNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_group: str = None, + node_groups_add_nodes_request_resource: compute.NodeGroupsAddNodesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds specified number of nodes to the node group. + + Args: + request (Union[google.cloud.compute_v1.types.AddNodesNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.AddNodes. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (str): + Name of the NodeGroup resource. + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_groups_add_nodes_request_resource (google.cloud.compute_v1.types.NodeGroupsAddNodesRequest): + The body resource for this request + This corresponds to the ``node_groups_add_nodes_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_group, node_groups_add_nodes_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddNodesNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddNodesNodeGroupRequest): + request = compute.AddNodesNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_group is not None: + request.node_group = node_group + if node_groups_add_nodes_request_resource is not None: + request.node_groups_add_nodes_request_resource = node_groups_add_nodes_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_nodes] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def aggregated_list(self, + request: Union[compute.AggregatedListNodeGroupsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of node groups. Note: + use nodeGroups.listNodes for more details about each + group. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListNodeGroupsRequest, dict]): + The request object. A request message for + NodeGroups.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.node_groups.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListNodeGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListNodeGroupsRequest): + request = compute.AggregatedListNodeGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified NodeGroup resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (str): + Name of the NodeGroup resource to + delete. + + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteNodeGroupRequest): + request = compute.DeleteNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_group is not None: + request.node_group = node_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_nodes(self, + request: Union[compute.DeleteNodesNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_group: str = None, + node_groups_delete_nodes_request_resource: compute.NodeGroupsDeleteNodesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes specified nodes from the node group. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteNodesNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.DeleteNodes. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (str): + Name of the NodeGroup resource whose + nodes will be deleted. + + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_groups_delete_nodes_request_resource (google.cloud.compute_v1.types.NodeGroupsDeleteNodesRequest): + The body resource for this request + This corresponds to the ``node_groups_delete_nodes_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_group, node_groups_delete_nodes_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteNodesNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteNodesNodeGroupRequest): + request = compute.DeleteNodesNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_group is not None: + request.node_group = node_group + if node_groups_delete_nodes_request_resource is not None: + request.node_groups_delete_nodes_request_resource = node_groups_delete_nodes_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_nodes] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NodeGroup: + r"""Returns the specified NodeGroup. Get a list of + available NodeGroups by making a list() request. Note: + the "nodes" field should not be used. Use + nodeGroups.listNodes instead. + + Args: + request (Union[google.cloud.compute_v1.types.GetNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (str): + Name of the node group to return. + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NodeGroup: + Represents a sole-tenant Node Group + resource. A sole-tenant node is a + physical server that is dedicated to + hosting VM instances only for your + specific project. Use sole-tenant nodes + to keep your instances physically + separated from instances in other + projects, or to group your instances + together on the same host hardware. For + more information, read Sole-tenant + nodes. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetNodeGroupRequest): + request = compute.GetNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_group is not None: + request.node_group = node_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyNodeGroupRequest): + request = compute.GetIamPolicyNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + initial_node_count: int = None, + node_group_resource: compute.NodeGroup = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a NodeGroup resource in the specified project + using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + initial_node_count (int): + Initial count of nodes in the node + group. + + This corresponds to the ``initial_node_count`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group_resource (google.cloud.compute_v1.types.NodeGroup): + The body resource for this request + This corresponds to the ``node_group_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, initial_node_count, node_group_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertNodeGroupRequest): + request = compute.InsertNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if initial_node_count is not None: + request.initial_node_count = initial_node_count + if node_group_resource is not None: + request.node_group_resource = node_group_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListNodeGroupsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of node groups available to the + specified project. Note: use nodeGroups.listNodes for + more details about each group. + + Args: + request (Union[google.cloud.compute_v1.types.ListNodeGroupsRequest, dict]): + The request object. A request message for + NodeGroups.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.node_groups.pagers.ListPager: + Contains a list of nodeGroups. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNodeGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNodeGroupsRequest): + request = compute.ListNodeGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_nodes(self, + request: Union[compute.ListNodesNodeGroupsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListNodesPager: + r"""Lists nodes in the node group. + + Args: + request (Union[google.cloud.compute_v1.types.ListNodesNodeGroupsRequest, dict]): + The request object. A request message for + NodeGroups.ListNodes. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (str): + Name of the NodeGroup resource whose + nodes you want to list. + + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.node_groups.pagers.ListNodesPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNodesNodeGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNodesNodeGroupsRequest): + request = compute.ListNodesNodeGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_group is not None: + request.node_group = node_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_nodes] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListNodesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_group: str = None, + node_group_resource: compute.NodeGroup = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified node group. + + Args: + request (Union[google.cloud.compute_v1.types.PatchNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (str): + Name of the NodeGroup resource to + update. + + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group_resource (google.cloud.compute_v1.types.NodeGroup): + The body resource for this request + This corresponds to the ``node_group_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_group, node_group_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchNodeGroupRequest): + request = compute.PatchNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_group is not None: + request.node_group = node_group + if node_group_resource is not None: + request.node_group_resource = node_group_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + zone_set_policy_request_resource: compute.ZoneSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + This corresponds to the ``zone_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, zone_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyNodeGroupRequest): + request = compute.SetIamPolicyNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if zone_set_policy_request_resource is not None: + request.zone_set_policy_request_resource = zone_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_node_template(self, + request: Union[compute.SetNodeTemplateNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_group: str = None, + node_groups_set_node_template_request_resource: compute.NodeGroupsSetNodeTemplateRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the node template of the node group. + + Args: + request (Union[google.cloud.compute_v1.types.SetNodeTemplateNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.SetNodeTemplate. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (str): + Name of the NodeGroup resource to + update. + + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_groups_set_node_template_request_resource (google.cloud.compute_v1.types.NodeGroupsSetNodeTemplateRequest): + The body resource for this request + This corresponds to the ``node_groups_set_node_template_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_group, node_groups_set_node_template_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetNodeTemplateNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetNodeTemplateNodeGroupRequest): + request = compute.SetNodeTemplateNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_group is not None: + request.node_group = node_group + if node_groups_set_node_template_request_resource is not None: + request.node_groups_set_node_template_request_resource = node_groups_set_node_template_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_node_template] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsNodeGroupRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsNodeGroupRequest, dict]): + The request object. A request message for + NodeGroups.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsNodeGroupRequest): + request = compute.TestIamPermissionsNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "NodeGroupsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/pagers.py new file mode 100644 index 000000000..cbf3b9b86 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/pagers.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NodeGroupAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NodeGroupAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NodeGroupAggregatedList], + request: compute.AggregatedListNodeGroupsRequest, + response: compute.NodeGroupAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListNodeGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NodeGroupAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListNodeGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NodeGroupAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.NodeGroupsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.NodeGroupsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NodeGroupList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NodeGroupList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NodeGroupList], + request: compute.ListNodeGroupsRequest, + response: compute.NodeGroupList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNodeGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NodeGroupList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNodeGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NodeGroupList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NodeGroup]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListNodesPager: + """A pager for iterating through ``list_nodes`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NodeGroupsListNodes` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListNodes`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NodeGroupsListNodes` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NodeGroupsListNodes], + request: compute.ListNodesNodeGroupsRequest, + response: compute.NodeGroupsListNodes, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNodesNodeGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NodeGroupsListNodes): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNodesNodeGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NodeGroupsListNodes]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NodeGroupNode]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/__init__.py new file mode 100644 index 000000000..ae2a79e09 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NodeGroupsTransport +from .rest import NodeGroupsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[NodeGroupsTransport]] +_transport_registry['rest'] = NodeGroupsRestTransport + +__all__ = ( + 'NodeGroupsTransport', + 'NodeGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/base.py new file mode 100644 index 000000000..d35866ce8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/base.py @@ -0,0 +1,315 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class NodeGroupsTransport(abc.ABC): + """Abstract transport class for NodeGroups.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_nodes: gapic_v1.method.wrap_method( + self.add_nodes, + default_timeout=None, + client_info=client_info, + ), + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.delete_nodes: gapic_v1.method.wrap_method( + self.delete_nodes, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_nodes: gapic_v1.method.wrap_method( + self.list_nodes, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_node_template: gapic_v1.method.wrap_method( + self.set_node_template, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_nodes(self) -> Callable[ + [compute.AddNodesNodeGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNodeGroupsRequest], + Union[ + compute.NodeGroupAggregatedList, + Awaitable[compute.NodeGroupAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteNodeGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_nodes(self) -> Callable[ + [compute.DeleteNodesNodeGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetNodeGroupRequest], + Union[ + compute.NodeGroup, + Awaitable[compute.NodeGroup] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyNodeGroupRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertNodeGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListNodeGroupsRequest], + Union[ + compute.NodeGroupList, + Awaitable[compute.NodeGroupList] + ]]: + raise NotImplementedError() + + @property + def list_nodes(self) -> Callable[ + [compute.ListNodesNodeGroupsRequest], + Union[ + compute.NodeGroupsListNodes, + Awaitable[compute.NodeGroupsListNodes] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchNodeGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyNodeGroupRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def set_node_template(self) -> Callable[ + [compute.SetNodeTemplateNodeGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsNodeGroupRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'NodeGroupsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/rest.py new file mode 100644 index 000000000..e9b52becf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_groups/transports/rest.py @@ -0,0 +1,1631 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import NodeGroupsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class NodeGroupsRestTransport(NodeGroupsTransport): + """REST backend transport for NodeGroups. + + The NodeGroups API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_nodes(self, + request: compute.AddNodesNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add nodes method over HTTP. + + Args: + request (~.compute.AddNodesNodeGroupRequest): + The request object. A request message for + NodeGroups.AddNodes. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/addNodes', + 'body': 'node_groups_add_nodes_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_group", + "nodeGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.AddNodesNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NodeGroupsAddNodesRequest.to_json( + compute.NodeGroupsAddNodesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddNodesNodeGroupRequest.to_json( + compute.AddNodesNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _aggregated_list(self, + request: compute.AggregatedListNodeGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeGroupAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListNodeGroupsRequest): + The request object. A request message for + NodeGroups.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeGroupAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/nodeGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListNodeGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListNodeGroupsRequest.to_json( + compute.AggregatedListNodeGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeGroupAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteNodeGroupRequest): + The request object. A request message for + NodeGroups.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_group", + "nodeGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteNodeGroupRequest.to_json( + compute.DeleteNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_nodes(self, + request: compute.DeleteNodesNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete nodes method over HTTP. + + Args: + request (~.compute.DeleteNodesNodeGroupRequest): + The request object. A request message for + NodeGroups.DeleteNodes. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/deleteNodes', + 'body': 'node_groups_delete_nodes_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_group", + "nodeGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteNodesNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NodeGroupsDeleteNodesRequest.to_json( + compute.NodeGroupsDeleteNodesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteNodesNodeGroupRequest.to_json( + compute.DeleteNodesNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeGroup: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetNodeGroupRequest): + The request object. A request message for NodeGroups.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeGroup: + Represents a sole-tenant Node Group + resource. A sole-tenant node is a + physical server that is dedicated to + hosting VM instances only for your + specific project. Use sole-tenant nodes + to keep your instances physically + separated from instances in other + projects, or to group your instances + together on the same host hardware. For + more information, read Sole-tenant + nodes. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_group", + "nodeGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetNodeGroupRequest.to_json( + compute.GetNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeGroup.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyNodeGroupRequest): + The request object. A request message for + NodeGroups.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetIamPolicyNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyNodeGroupRequest.to_json( + compute.GetIamPolicyNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertNodeGroupRequest): + The request object. A request message for + NodeGroups.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups', + 'body': 'node_group_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "initial_node_count", + "initialNodeCount" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NodeGroup.to_json( + compute.NodeGroup( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertNodeGroupRequest.to_json( + compute.InsertNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListNodeGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeGroupList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListNodeGroupsRequest): + The request object. A request message for + NodeGroups.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeGroupList: + Contains a list of nodeGroups. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListNodeGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNodeGroupsRequest.to_json( + compute.ListNodeGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeGroupList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_nodes(self, + request: compute.ListNodesNodeGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeGroupsListNodes: + r"""Call the list nodes method over HTTP. + + Args: + request (~.compute.ListNodesNodeGroupsRequest): + The request object. A request message for + NodeGroups.ListNodes. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeGroupsListNodes: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/listNodes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_group", + "nodeGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListNodesNodeGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNodesNodeGroupsRequest.to_json( + compute.ListNodesNodeGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeGroupsListNodes.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchNodeGroupRequest): + The request object. A request message for + NodeGroups.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}', + 'body': 'node_group_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_group", + "nodeGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.PatchNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NodeGroup.to_json( + compute.NodeGroup( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchNodeGroupRequest.to_json( + compute.PatchNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyNodeGroupRequest): + The request object. A request message for + NodeGroups.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy', + 'body': 'zone_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetIamPolicyNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ZoneSetPolicyRequest.to_json( + compute.ZoneSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyNodeGroupRequest.to_json( + compute.SetIamPolicyNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_node_template(self, + request: compute.SetNodeTemplateNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set node template method over HTTP. + + Args: + request (~.compute.SetNodeTemplateNodeGroupRequest): + The request object. A request message for + NodeGroups.SetNodeTemplate. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/setNodeTemplate', + 'body': 'node_groups_set_node_template_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_group", + "nodeGroup" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetNodeTemplateNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NodeGroupsSetNodeTemplateRequest.to_json( + compute.NodeGroupsSetNodeTemplateRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetNodeTemplateNodeGroupRequest.to_json( + compute.SetNodeTemplateNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsNodeGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsNodeGroupRequest): + The request object. A request message for + NodeGroups.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.TestIamPermissionsNodeGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsNodeGroupRequest.to_json( + compute.TestIamPermissionsNodeGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_nodes(self) -> Callable[ + [compute.AddNodesNodeGroupRequest], + compute.Operation]: + return self._add_nodes + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNodeGroupsRequest], + compute.NodeGroupAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteNodeGroupRequest], + compute.Operation]: + return self._delete + @ property + def delete_nodes(self) -> Callable[ + [compute.DeleteNodesNodeGroupRequest], + compute.Operation]: + return self._delete_nodes + @ property + def get(self) -> Callable[ + [compute.GetNodeGroupRequest], + compute.NodeGroup]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyNodeGroupRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertNodeGroupRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListNodeGroupsRequest], + compute.NodeGroupList]: + return self._list + @ property + def list_nodes(self) -> Callable[ + [compute.ListNodesNodeGroupsRequest], + compute.NodeGroupsListNodes]: + return self._list_nodes + @ property + def patch(self) -> Callable[ + [compute.PatchNodeGroupRequest], + compute.Operation]: + return self._patch + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyNodeGroupRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def set_node_template(self) -> Callable[ + [compute.SetNodeTemplateNodeGroupRequest], + compute.Operation]: + return self._set_node_template + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsNodeGroupRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'NodeGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/__init__.py new file mode 100644 index 000000000..d7038481b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NodeTemplatesClient + +__all__ = ( + 'NodeTemplatesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/client.py new file mode 100644 index 000000000..fd3638029 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/client.py @@ -0,0 +1,1153 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.node_templates import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import NodeTemplatesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import NodeTemplatesRestTransport + + +class NodeTemplatesClientMeta(type): + """Metaclass for the NodeTemplates client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[NodeTemplatesTransport]] + _transport_registry["rest"] = NodeTemplatesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[NodeTemplatesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NodeTemplatesClient(metaclass=NodeTemplatesClientMeta): + """The NodeTemplates API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeTemplatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeTemplatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NodeTemplatesTransport: + """Returns the transport used by the client instance. + + Returns: + NodeTemplatesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NodeTemplatesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the node templates client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NodeTemplatesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, NodeTemplatesTransport): + # transport is a NodeTemplatesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListNodeTemplatesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of node templates. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListNodeTemplatesRequest, dict]): + The request object. A request message for + NodeTemplates.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.node_templates.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListNodeTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListNodeTemplatesRequest): + request = compute.AggregatedListNodeTemplatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteNodeTemplateRequest, dict] = None, + *, + project: str = None, + region: str = None, + node_template: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified NodeTemplate resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteNodeTemplateRequest, dict]): + The request object. A request message for + NodeTemplates.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_template (str): + Name of the NodeTemplate resource to + delete. + + This corresponds to the ``node_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, node_template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteNodeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteNodeTemplateRequest): + request = compute.DeleteNodeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if node_template is not None: + request.node_template = node_template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetNodeTemplateRequest, dict] = None, + *, + project: str = None, + region: str = None, + node_template: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NodeTemplate: + r"""Returns the specified node template. Gets a list of + available node templates by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetNodeTemplateRequest, dict]): + The request object. A request message for + NodeTemplates.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_template (str): + Name of the node template to return. + This corresponds to the ``node_template`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NodeTemplate: + Represent a sole-tenant Node Template + resource. You can use a template to + define properties for nodes in a node + group. For more information, read + Creating node groups and instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, node_template]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetNodeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetNodeTemplateRequest): + request = compute.GetNodeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if node_template is not None: + request.node_template = node_template + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyNodeTemplateRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyNodeTemplateRequest, dict]): + The request object. A request message for + NodeTemplates.GetIamPolicy. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyNodeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyNodeTemplateRequest): + request = compute.GetIamPolicyNodeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertNodeTemplateRequest, dict] = None, + *, + project: str = None, + region: str = None, + node_template_resource: compute.NodeTemplate = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a NodeTemplate resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertNodeTemplateRequest, dict]): + The request object. A request message for + NodeTemplates.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_template_resource (google.cloud.compute_v1.types.NodeTemplate): + The body resource for this request + This corresponds to the ``node_template_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, node_template_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertNodeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertNodeTemplateRequest): + request = compute.InsertNodeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if node_template_resource is not None: + request.node_template_resource = node_template_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListNodeTemplatesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of node templates available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListNodeTemplatesRequest, dict]): + The request object. A request message for + NodeTemplates.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.node_templates.pagers.ListPager: + Contains a list of node templates. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNodeTemplatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNodeTemplatesRequest): + request = compute.ListNodeTemplatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyNodeTemplateRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_policy_request_resource: compute.RegionSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyNodeTemplateRequest, dict]): + The request object. A request message for + NodeTemplates.SetIamPolicy. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + This corresponds to the ``region_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyNodeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyNodeTemplateRequest): + request = compute.SetIamPolicyNodeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_policy_request_resource is not None: + request.region_set_policy_request_resource = region_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsNodeTemplateRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsNodeTemplateRequest, dict]): + The request object. A request message for + NodeTemplates.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsNodeTemplateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsNodeTemplateRequest): + request = compute.TestIamPermissionsNodeTemplateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "NodeTemplatesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/pagers.py new file mode 100644 index 000000000..8537aa880 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NodeTemplateAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NodeTemplateAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NodeTemplateAggregatedList], + request: compute.AggregatedListNodeTemplatesRequest, + response: compute.NodeTemplateAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListNodeTemplatesRequest): + The initial request object. + response (google.cloud.compute_v1.types.NodeTemplateAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListNodeTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NodeTemplateAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.NodeTemplatesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.NodeTemplatesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NodeTemplateList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NodeTemplateList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NodeTemplateList], + request: compute.ListNodeTemplatesRequest, + response: compute.NodeTemplateList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNodeTemplatesRequest): + The initial request object. + response (google.cloud.compute_v1.types.NodeTemplateList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNodeTemplatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NodeTemplateList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NodeTemplate]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/__init__.py new file mode 100644 index 000000000..a735a89cf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NodeTemplatesTransport +from .rest import NodeTemplatesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[NodeTemplatesTransport]] +_transport_registry['rest'] = NodeTemplatesRestTransport + +__all__ = ( + 'NodeTemplatesTransport', + 'NodeTemplatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/base.py new file mode 100644 index 000000000..df0e4ce24 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/base.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class NodeTemplatesTransport(abc.ABC): + """Abstract transport class for NodeTemplates.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNodeTemplatesRequest], + Union[ + compute.NodeTemplateAggregatedList, + Awaitable[compute.NodeTemplateAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteNodeTemplateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetNodeTemplateRequest], + Union[ + compute.NodeTemplate, + Awaitable[compute.NodeTemplate] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyNodeTemplateRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertNodeTemplateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListNodeTemplatesRequest], + Union[ + compute.NodeTemplateList, + Awaitable[compute.NodeTemplateList] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyNodeTemplateRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsNodeTemplateRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'NodeTemplatesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/rest.py new file mode 100644 index 000000000..11e1829fe --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_templates/transports/rest.py @@ -0,0 +1,1036 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import NodeTemplatesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class NodeTemplatesRestTransport(NodeTemplatesTransport): + """REST backend transport for NodeTemplates. + + The NodeTemplates API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListNodeTemplatesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeTemplateAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListNodeTemplatesRequest): + The request object. A request message for + NodeTemplates.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeTemplateAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/nodeTemplates', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListNodeTemplatesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListNodeTemplatesRequest.to_json( + compute.AggregatedListNodeTemplatesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeTemplateAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteNodeTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteNodeTemplateRequest): + The request object. A request message for + NodeTemplates.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{node_template}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_template", + "nodeTemplate" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteNodeTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteNodeTemplateRequest.to_json( + compute.DeleteNodeTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetNodeTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeTemplate: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetNodeTemplateRequest): + The request object. A request message for + NodeTemplates.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeTemplate: + Represent a sole-tenant Node Template + resource. You can use a template to + define properties for nodes in a node + group. For more information, read + Creating node groups and instances. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{node_template}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_template", + "nodeTemplate" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetNodeTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetNodeTemplateRequest.to_json( + compute.GetNodeTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeTemplate.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyNodeTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyNodeTemplateRequest): + The request object. A request message for + NodeTemplates.GetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyNodeTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyNodeTemplateRequest.to_json( + compute.GetIamPolicyNodeTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertNodeTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertNodeTemplateRequest): + The request object. A request message for + NodeTemplates.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/nodeTemplates', + 'body': 'node_template_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertNodeTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NodeTemplate.to_json( + compute.NodeTemplate( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertNodeTemplateRequest.to_json( + compute.InsertNodeTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListNodeTemplatesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeTemplateList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListNodeTemplatesRequest): + The request object. A request message for + NodeTemplates.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeTemplateList: + Contains a list of node templates. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/nodeTemplates', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListNodeTemplatesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNodeTemplatesRequest.to_json( + compute.ListNodeTemplatesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeTemplateList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyNodeTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyNodeTemplateRequest): + The request object. A request message for + NodeTemplates.SetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy', + 'body': 'region_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyNodeTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetPolicyRequest.to_json( + compute.RegionSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyNodeTemplateRequest.to_json( + compute.SetIamPolicyNodeTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsNodeTemplateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsNodeTemplateRequest): + The request object. A request message for + NodeTemplates.TestIamPermissions. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsNodeTemplateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsNodeTemplateRequest.to_json( + compute.TestIamPermissionsNodeTemplateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNodeTemplatesRequest], + compute.NodeTemplateAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteNodeTemplateRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetNodeTemplateRequest], + compute.NodeTemplate]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyNodeTemplateRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertNodeTemplateRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListNodeTemplatesRequest], + compute.NodeTemplateList]: + return self._list + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyNodeTemplateRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsNodeTemplateRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'NodeTemplatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/__init__.py new file mode 100644 index 000000000..ba4649b21 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NodeTypesClient + +__all__ = ( + 'NodeTypesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/client.py new file mode 100644 index 000000000..0d2fd38d3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/client.py @@ -0,0 +1,610 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.node_types import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import NodeTypesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import NodeTypesRestTransport + + +class NodeTypesClientMeta(type): + """Metaclass for the NodeTypes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[NodeTypesTransport]] + _transport_registry["rest"] = NodeTypesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[NodeTypesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NodeTypesClient(metaclass=NodeTypesClientMeta): + """The NodeTypes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NodeTypesTransport: + """Returns the transport used by the client instance. + + Returns: + NodeTypesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NodeTypesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the node types client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NodeTypesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, NodeTypesTransport): + # transport is a NodeTypesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListNodeTypesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of node types. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListNodeTypesRequest, dict]): + The request object. A request message for + NodeTypes.AggregatedList. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.node_types.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListNodeTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListNodeTypesRequest): + request = compute.AggregatedListNodeTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetNodeTypeRequest, dict] = None, + *, + project: str = None, + zone: str = None, + node_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NodeType: + r"""Returns the specified node type. Gets a list of + available node types by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetNodeTypeRequest, dict]): + The request object. A request message for NodeTypes.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_type (str): + Name of the node type to return. + This corresponds to the ``node_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NodeType: + Represent a sole-tenant Node Type + resource. Each node within a node group + must have a node type. A node type + specifies the total amount of cores and + memory for that node. Currently, the + only available node type is + n1-node-96-624 node type that has 96 + vCPUs and 624 GB of memory, available in + multiple zones. For more information + read Node types. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, node_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetNodeTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetNodeTypeRequest): + request = compute.GetNodeTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if node_type is not None: + request.node_type = node_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListNodeTypesRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of node types available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListNodeTypesRequest, dict]): + The request object. A request message for + NodeTypes.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.node_types.pagers.ListPager: + Contains a list of node types. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListNodeTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListNodeTypesRequest): + request = compute.ListNodeTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "NodeTypesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/pagers.py new file mode 100644 index 000000000..8eb57c25c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NodeTypeAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NodeTypeAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NodeTypeAggregatedList], + request: compute.AggregatedListNodeTypesRequest, + response: compute.NodeTypeAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListNodeTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.NodeTypeAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListNodeTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NodeTypeAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.NodeTypesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.NodeTypesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NodeTypeList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NodeTypeList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NodeTypeList], + request: compute.ListNodeTypesRequest, + response: compute.NodeTypeList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListNodeTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.NodeTypeList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListNodeTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NodeTypeList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NodeType]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/__init__.py new file mode 100644 index 000000000..3ad6de1a8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NodeTypesTransport +from .rest import NodeTypesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[NodeTypesTransport]] +_transport_registry['rest'] = NodeTypesRestTransport + +__all__ = ( + 'NodeTypesTransport', + 'NodeTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/base.py new file mode 100644 index 000000000..c86dbb5df --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/base.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class NodeTypesTransport(abc.ABC): + """Abstract transport class for NodeTypes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNodeTypesRequest], + Union[ + compute.NodeTypeAggregatedList, + Awaitable[compute.NodeTypeAggregatedList] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetNodeTypeRequest], + Union[ + compute.NodeType, + Awaitable[compute.NodeType] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListNodeTypesRequest], + Union[ + compute.NodeTypeList, + Awaitable[compute.NodeTypeList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'NodeTypesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/rest.py new file mode 100644 index 000000000..eb19b2d3e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/node_types/transports/rest.py @@ -0,0 +1,416 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import NodeTypesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class NodeTypesRestTransport(NodeTypesTransport): + """REST backend transport for NodeTypes. + + The NodeTypes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListNodeTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeTypeAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListNodeTypesRequest): + The request object. A request message for + NodeTypes.AggregatedList. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeTypeAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/nodeTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListNodeTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListNodeTypesRequest.to_json( + compute.AggregatedListNodeTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeTypeAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetNodeTypeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeType: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetNodeTypeRequest): + The request object. A request message for NodeTypes.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeType: + Represent a sole-tenant Node Type + resource. Each node within a node group + must have a node type. A node type + specifies the total amount of cores and + memory for that node. Currently, the + only available node type is + n1-node-96-624 node type that has 96 + vCPUs and 624 GB of memory, available in + multiple zones. For more information + read Node types. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeTypes/{node_type}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "node_type", + "nodeType" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetNodeTypeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetNodeTypeRequest.to_json( + compute.GetNodeTypeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeType.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListNodeTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NodeTypeList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListNodeTypesRequest): + The request object. A request message for NodeTypes.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NodeTypeList: + Contains a list of node types. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/nodeTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListNodeTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListNodeTypesRequest.to_json( + compute.ListNodeTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NodeTypeList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListNodeTypesRequest], + compute.NodeTypeAggregatedList]: + return self._aggregated_list + @ property + def get(self) -> Callable[ + [compute.GetNodeTypeRequest], + compute.NodeType]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListNodeTypesRequest], + compute.NodeTypeList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'NodeTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/__init__.py new file mode 100644 index 000000000..56c7e3417 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PacketMirroringsClient + +__all__ = ( + 'PacketMirroringsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/client.py new file mode 100644 index 000000000..6ecae6a29 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/client.py @@ -0,0 +1,1006 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.packet_mirrorings import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import PacketMirroringsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import PacketMirroringsRestTransport + + +class PacketMirroringsClientMeta(type): + """Metaclass for the PacketMirrorings client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PacketMirroringsTransport]] + _transport_registry["rest"] = PacketMirroringsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PacketMirroringsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PacketMirroringsClient(metaclass=PacketMirroringsClientMeta): + """The PacketMirrorings API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PacketMirroringsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PacketMirroringsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PacketMirroringsTransport: + """Returns the transport used by the client instance. + + Returns: + PacketMirroringsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PacketMirroringsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the packet mirrorings client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PacketMirroringsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PacketMirroringsTransport): + # transport is a PacketMirroringsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListPacketMirroringsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of packetMirrorings. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListPacketMirroringsRequest, dict]): + The request object. A request message for + PacketMirrorings.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.packet_mirrorings.pagers.AggregatedListPager: + Contains a list of packetMirrorings. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListPacketMirroringsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListPacketMirroringsRequest): + request = compute.AggregatedListPacketMirroringsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeletePacketMirroringRequest, dict] = None, + *, + project: str = None, + region: str = None, + packet_mirroring: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified PacketMirroring resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeletePacketMirroringRequest, dict]): + The request object. A request message for + PacketMirrorings.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + packet_mirroring (str): + Name of the PacketMirroring resource + to delete. + + This corresponds to the ``packet_mirroring`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, packet_mirroring]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeletePacketMirroringRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeletePacketMirroringRequest): + request = compute.DeletePacketMirroringRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if packet_mirroring is not None: + request.packet_mirroring = packet_mirroring + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetPacketMirroringRequest, dict] = None, + *, + project: str = None, + region: str = None, + packet_mirroring: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.PacketMirroring: + r"""Returns the specified PacketMirroring resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetPacketMirroringRequest, dict]): + The request object. A request message for + PacketMirrorings.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + packet_mirroring (str): + Name of the PacketMirroring resource + to return. + + This corresponds to the ``packet_mirroring`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.PacketMirroring: + Represents a Packet Mirroring + resource. Packet Mirroring clones the + traffic of specified instances in your + Virtual Private Cloud (VPC) network and + forwards it to a collector destination, + such as an instance group of an internal + TCP/UDP load balancer, for analysis or + examination. For more information about + setting up Packet Mirroring, see Using + Packet Mirroring. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, packet_mirroring]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetPacketMirroringRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetPacketMirroringRequest): + request = compute.GetPacketMirroringRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if packet_mirroring is not None: + request.packet_mirroring = packet_mirroring + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertPacketMirroringRequest, dict] = None, + *, + project: str = None, + region: str = None, + packet_mirroring_resource: compute.PacketMirroring = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a PacketMirroring resource in the specified + project and region using the data included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertPacketMirroringRequest, dict]): + The request object. A request message for + PacketMirrorings.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + packet_mirroring_resource (google.cloud.compute_v1.types.PacketMirroring): + The body resource for this request + This corresponds to the ``packet_mirroring_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, packet_mirroring_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertPacketMirroringRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertPacketMirroringRequest): + request = compute.InsertPacketMirroringRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if packet_mirroring_resource is not None: + request.packet_mirroring_resource = packet_mirroring_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListPacketMirroringsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of PacketMirroring resources + available to the specified project and region. + + Args: + request (Union[google.cloud.compute_v1.types.ListPacketMirroringsRequest, dict]): + The request object. A request message for + PacketMirrorings.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.packet_mirrorings.pagers.ListPager: + Contains a list of PacketMirroring + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListPacketMirroringsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListPacketMirroringsRequest): + request = compute.ListPacketMirroringsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchPacketMirroringRequest, dict] = None, + *, + project: str = None, + region: str = None, + packet_mirroring: str = None, + packet_mirroring_resource: compute.PacketMirroring = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified PacketMirroring resource with + the data included in the request. This method supports + PATCH semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchPacketMirroringRequest, dict]): + The request object. A request message for + PacketMirrorings.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + packet_mirroring (str): + Name of the PacketMirroring resource + to patch. + + This corresponds to the ``packet_mirroring`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + packet_mirroring_resource (google.cloud.compute_v1.types.PacketMirroring): + The body resource for this request + This corresponds to the ``packet_mirroring_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, packet_mirroring, packet_mirroring_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchPacketMirroringRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchPacketMirroringRequest): + request = compute.PatchPacketMirroringRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if packet_mirroring is not None: + request.packet_mirroring = packet_mirroring + if packet_mirroring_resource is not None: + request.packet_mirroring_resource = packet_mirroring_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsPacketMirroringRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsPacketMirroringRequest, dict]): + The request object. A request message for + PacketMirrorings.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsPacketMirroringRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsPacketMirroringRequest): + request = compute.TestIamPermissionsPacketMirroringRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PacketMirroringsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/pagers.py new file mode 100644 index 000000000..5f4244280 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.PacketMirroringAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.PacketMirroringAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.PacketMirroringAggregatedList], + request: compute.AggregatedListPacketMirroringsRequest, + response: compute.PacketMirroringAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListPacketMirroringsRequest): + The initial request object. + response (google.cloud.compute_v1.types.PacketMirroringAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListPacketMirroringsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.PacketMirroringAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.PacketMirroringsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.PacketMirroringsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.PacketMirroringList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.PacketMirroringList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.PacketMirroringList], + request: compute.ListPacketMirroringsRequest, + response: compute.PacketMirroringList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListPacketMirroringsRequest): + The initial request object. + response (google.cloud.compute_v1.types.PacketMirroringList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListPacketMirroringsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.PacketMirroringList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.PacketMirroring]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/__init__.py new file mode 100644 index 000000000..70ed5ff32 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PacketMirroringsTransport +from .rest import PacketMirroringsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PacketMirroringsTransport]] +_transport_registry['rest'] = PacketMirroringsRestTransport + +__all__ = ( + 'PacketMirroringsTransport', + 'PacketMirroringsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/base.py new file mode 100644 index 000000000..aece70fc4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PacketMirroringsTransport(abc.ABC): + """Abstract transport class for PacketMirrorings.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListPacketMirroringsRequest], + Union[ + compute.PacketMirroringAggregatedList, + Awaitable[compute.PacketMirroringAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeletePacketMirroringRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetPacketMirroringRequest], + Union[ + compute.PacketMirroring, + Awaitable[compute.PacketMirroring] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertPacketMirroringRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListPacketMirroringsRequest], + Union[ + compute.PacketMirroringList, + Awaitable[compute.PacketMirroringList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchPacketMirroringRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsPacketMirroringRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'PacketMirroringsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/rest.py new file mode 100644 index 000000000..72d76e68b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/packet_mirrorings/transports/rest.py @@ -0,0 +1,880 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import PacketMirroringsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class PacketMirroringsRestTransport(PacketMirroringsTransport): + """REST backend transport for PacketMirrorings. + + The PacketMirrorings API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListPacketMirroringsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PacketMirroringAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListPacketMirroringsRequest): + The request object. A request message for + PacketMirrorings.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PacketMirroringAggregatedList: + Contains a list of packetMirrorings. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/packetMirrorings', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListPacketMirroringsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListPacketMirroringsRequest.to_json( + compute.AggregatedListPacketMirroringsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PacketMirroringAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeletePacketMirroringRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeletePacketMirroringRequest): + The request object. A request message for + PacketMirrorings.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{packet_mirroring}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "packet_mirroring", + "packetMirroring" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeletePacketMirroringRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeletePacketMirroringRequest.to_json( + compute.DeletePacketMirroringRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetPacketMirroringRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PacketMirroring: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetPacketMirroringRequest): + The request object. A request message for + PacketMirrorings.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PacketMirroring: + Represents a Packet Mirroring + resource. Packet Mirroring clones the + traffic of specified instances in your + Virtual Private Cloud (VPC) network and + forwards it to a collector destination, + such as an instance group of an internal + TCP/UDP load balancer, for analysis or + examination. For more information about + setting up Packet Mirroring, see Using + Packet Mirroring. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{packet_mirroring}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "packet_mirroring", + "packetMirroring" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetPacketMirroringRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetPacketMirroringRequest.to_json( + compute.GetPacketMirroringRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PacketMirroring.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertPacketMirroringRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertPacketMirroringRequest): + The request object. A request message for + PacketMirrorings.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/packetMirrorings', + 'body': 'packet_mirroring_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertPacketMirroringRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PacketMirroring.to_json( + compute.PacketMirroring( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertPacketMirroringRequest.to_json( + compute.InsertPacketMirroringRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListPacketMirroringsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PacketMirroringList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListPacketMirroringsRequest): + The request object. A request message for + PacketMirrorings.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PacketMirroringList: + Contains a list of PacketMirroring + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/packetMirrorings', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListPacketMirroringsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListPacketMirroringsRequest.to_json( + compute.ListPacketMirroringsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PacketMirroringList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchPacketMirroringRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchPacketMirroringRequest): + The request object. A request message for + PacketMirrorings.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{packet_mirroring}', + 'body': 'packet_mirroring_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "packet_mirroring", + "packetMirroring" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchPacketMirroringRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PacketMirroring.to_json( + compute.PacketMirroring( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchPacketMirroringRequest.to_json( + compute.PatchPacketMirroringRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsPacketMirroringRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsPacketMirroringRequest): + The request object. A request message for + PacketMirrorings.TestIamPermissions. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsPacketMirroringRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsPacketMirroringRequest.to_json( + compute.TestIamPermissionsPacketMirroringRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListPacketMirroringsRequest], + compute.PacketMirroringAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeletePacketMirroringRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetPacketMirroringRequest], + compute.PacketMirroring]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertPacketMirroringRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListPacketMirroringsRequest], + compute.PacketMirroringList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchPacketMirroringRequest], + compute.Operation]: + return self._patch + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsPacketMirroringRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'PacketMirroringsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/__init__.py new file mode 100644 index 000000000..1ef64dec0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ProjectsClient + +__all__ = ( + 'ProjectsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/client.py new file mode 100644 index 000000000..00083173a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/client.py @@ -0,0 +1,1434 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.projects import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ProjectsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ProjectsRestTransport + + +class ProjectsClientMeta(type): + """Metaclass for the Projects client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ProjectsTransport]] + _transport_registry["rest"] = ProjectsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ProjectsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ProjectsClient(metaclass=ProjectsClientMeta): + """The Projects API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ProjectsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ProjectsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ProjectsTransport: + """Returns the transport used by the client instance. + + Returns: + ProjectsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ProjectsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the projects client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ProjectsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ProjectsTransport): + # transport is a ProjectsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def disable_xpn_host(self, + request: Union[compute.DisableXpnHostProjectRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Disable this project as a shared VPC host project. + + Args: + request (Union[google.cloud.compute_v1.types.DisableXpnHostProjectRequest, dict]): + The request object. A request message for + Projects.DisableXpnHost. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DisableXpnHostProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DisableXpnHostProjectRequest): + request = compute.DisableXpnHostProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.disable_xpn_host] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def disable_xpn_resource(self, + request: Union[compute.DisableXpnResourceProjectRequest, dict] = None, + *, + project: str = None, + projects_disable_xpn_resource_request_resource: compute.ProjectsDisableXpnResourceRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Disable a service resource (also known as service + project) associated with this host project. + + Args: + request (Union[google.cloud.compute_v1.types.DisableXpnResourceProjectRequest, dict]): + The request object. A request message for + Projects.DisableXpnResource. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + projects_disable_xpn_resource_request_resource (google.cloud.compute_v1.types.ProjectsDisableXpnResourceRequest): + The body resource for this request + This corresponds to the ``projects_disable_xpn_resource_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, projects_disable_xpn_resource_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DisableXpnResourceProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DisableXpnResourceProjectRequest): + request = compute.DisableXpnResourceProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if projects_disable_xpn_resource_request_resource is not None: + request.projects_disable_xpn_resource_request_resource = projects_disable_xpn_resource_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.disable_xpn_resource] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def enable_xpn_host(self, + request: Union[compute.EnableXpnHostProjectRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Enable this project as a shared VPC host project. + + Args: + request (Union[google.cloud.compute_v1.types.EnableXpnHostProjectRequest, dict]): + The request object. A request message for + Projects.EnableXpnHost. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.EnableXpnHostProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.EnableXpnHostProjectRequest): + request = compute.EnableXpnHostProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.enable_xpn_host] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def enable_xpn_resource(self, + request: Union[compute.EnableXpnResourceProjectRequest, dict] = None, + *, + project: str = None, + projects_enable_xpn_resource_request_resource: compute.ProjectsEnableXpnResourceRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Enable service resource (a.k.a service project) for a + host project, so that subnets in the host project can be + used by instances in the service project. + + Args: + request (Union[google.cloud.compute_v1.types.EnableXpnResourceProjectRequest, dict]): + The request object. A request message for + Projects.EnableXpnResource. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + projects_enable_xpn_resource_request_resource (google.cloud.compute_v1.types.ProjectsEnableXpnResourceRequest): + The body resource for this request + This corresponds to the ``projects_enable_xpn_resource_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, projects_enable_xpn_resource_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.EnableXpnResourceProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.EnableXpnResourceProjectRequest): + request = compute.EnableXpnResourceProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if projects_enable_xpn_resource_request_resource is not None: + request.projects_enable_xpn_resource_request_resource = projects_enable_xpn_resource_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.enable_xpn_resource] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetProjectRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Project: + r"""Returns the specified Project resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetProjectRequest, dict]): + The request object. A request message for Projects.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Project: + Represents a Project resource. A + project is used to organize resources in + a Google Cloud Platform environment. For + more information, read about the + Resource Hierarchy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetProjectRequest): + request = compute.GetProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_xpn_host(self, + request: Union[compute.GetXpnHostProjectRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Project: + r"""Gets the shared VPC host project that this project + links to. May be empty if no link exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetXpnHostProjectRequest, dict]): + The request object. A request message for + Projects.GetXpnHost. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Project: + Represents a Project resource. A + project is used to organize resources in + a Google Cloud Platform environment. For + more information, read about the + Resource Hierarchy. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetXpnHostProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetXpnHostProjectRequest): + request = compute.GetXpnHostProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_xpn_host] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_xpn_resources(self, + request: Union[compute.GetXpnResourcesProjectsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.GetXpnResourcesPager: + r"""Gets service resources (a.k.a service project) + associated with this host project. + + Args: + request (Union[google.cloud.compute_v1.types.GetXpnResourcesProjectsRequest, dict]): + The request object. A request message for + Projects.GetXpnResources. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.projects.pagers.GetXpnResourcesPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetXpnResourcesProjectsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetXpnResourcesProjectsRequest): + request = compute.GetXpnResourcesProjectsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_xpn_resources] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.GetXpnResourcesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_xpn_hosts(self, + request: Union[compute.ListXpnHostsProjectsRequest, dict] = None, + *, + project: str = None, + projects_list_xpn_hosts_request_resource: compute.ProjectsListXpnHostsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListXpnHostsPager: + r"""Lists all shared VPC host projects visible to the + user in an organization. + + Args: + request (Union[google.cloud.compute_v1.types.ListXpnHostsProjectsRequest, dict]): + The request object. A request message for + Projects.ListXpnHosts. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + projects_list_xpn_hosts_request_resource (google.cloud.compute_v1.types.ProjectsListXpnHostsRequest): + The body resource for this request + This corresponds to the ``projects_list_xpn_hosts_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.projects.pagers.ListXpnHostsPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, projects_list_xpn_hosts_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListXpnHostsProjectsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListXpnHostsProjectsRequest): + request = compute.ListXpnHostsProjectsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if projects_list_xpn_hosts_request_resource is not None: + request.projects_list_xpn_hosts_request_resource = projects_list_xpn_hosts_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_xpn_hosts] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListXpnHostsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def move_disk(self, + request: Union[compute.MoveDiskProjectRequest, dict] = None, + *, + project: str = None, + disk_move_request_resource: compute.DiskMoveRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Moves a persistent disk from one zone to another. + + Args: + request (Union[google.cloud.compute_v1.types.MoveDiskProjectRequest, dict]): + The request object. A request message for + Projects.MoveDisk. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk_move_request_resource (google.cloud.compute_v1.types.DiskMoveRequest): + The body resource for this request + This corresponds to the ``disk_move_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, disk_move_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.MoveDiskProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.MoveDiskProjectRequest): + request = compute.MoveDiskProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if disk_move_request_resource is not None: + request.disk_move_request_resource = disk_move_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.move_disk] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def move_instance(self, + request: Union[compute.MoveInstanceProjectRequest, dict] = None, + *, + project: str = None, + instance_move_request_resource: compute.InstanceMoveRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Moves an instance and its attached persistent disks + from one zone to another. + + Args: + request (Union[google.cloud.compute_v1.types.MoveInstanceProjectRequest, dict]): + The request object. A request message for + Projects.MoveInstance. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_move_request_resource (google.cloud.compute_v1.types.InstanceMoveRequest): + The body resource for this request + This corresponds to the ``instance_move_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, instance_move_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.MoveInstanceProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.MoveInstanceProjectRequest): + request = compute.MoveInstanceProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if instance_move_request_resource is not None: + request.instance_move_request_resource = instance_move_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.move_instance] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_common_instance_metadata(self, + request: Union[compute.SetCommonInstanceMetadataProjectRequest, dict] = None, + *, + project: str = None, + metadata_resource: compute.Metadata = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets metadata common to all instances within the + specified project using the data included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.SetCommonInstanceMetadataProjectRequest, dict]): + The request object. A request message for + Projects.SetCommonInstanceMetadata. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + metadata_resource (google.cloud.compute_v1.types.Metadata): + The body resource for this request + This corresponds to the ``metadata_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, metadata_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetCommonInstanceMetadataProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetCommonInstanceMetadataProjectRequest): + request = compute.SetCommonInstanceMetadataProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if metadata_resource is not None: + request.metadata_resource = metadata_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_common_instance_metadata] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_default_network_tier(self, + request: Union[compute.SetDefaultNetworkTierProjectRequest, dict] = None, + *, + project: str = None, + projects_set_default_network_tier_request_resource: compute.ProjectsSetDefaultNetworkTierRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the default network tier of the project. The + default network tier is used when an + address/forwardingRule/instance is created without + specifying the network tier field. + + Args: + request (Union[google.cloud.compute_v1.types.SetDefaultNetworkTierProjectRequest, dict]): + The request object. A request message for + Projects.SetDefaultNetworkTier. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + projects_set_default_network_tier_request_resource (google.cloud.compute_v1.types.ProjectsSetDefaultNetworkTierRequest): + The body resource for this request + This corresponds to the ``projects_set_default_network_tier_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, projects_set_default_network_tier_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetDefaultNetworkTierProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetDefaultNetworkTierProjectRequest): + request = compute.SetDefaultNetworkTierProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if projects_set_default_network_tier_request_resource is not None: + request.projects_set_default_network_tier_request_resource = projects_set_default_network_tier_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_default_network_tier] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_usage_export_bucket(self, + request: Union[compute.SetUsageExportBucketProjectRequest, dict] = None, + *, + project: str = None, + usage_export_location_resource: compute.UsageExportLocation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Enables the usage export feature and sets the usage + export bucket where reports are stored. If you provide + an empty request body using this method, the usage + export feature will be disabled. + + Args: + request (Union[google.cloud.compute_v1.types.SetUsageExportBucketProjectRequest, dict]): + The request object. A request message for + Projects.SetUsageExportBucket. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + usage_export_location_resource (google.cloud.compute_v1.types.UsageExportLocation): + The body resource for this request + This corresponds to the ``usage_export_location_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, usage_export_location_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetUsageExportBucketProjectRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetUsageExportBucketProjectRequest): + request = compute.SetUsageExportBucketProjectRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if usage_export_location_resource is not None: + request.usage_export_location_resource = usage_export_location_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_usage_export_bucket] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ProjectsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/pagers.py new file mode 100644 index 000000000..586c86bc8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/pagers.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class GetXpnResourcesPager: + """A pager for iterating through ``get_xpn_resources`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ProjectsGetXpnResources` object, and + provides an ``__iter__`` method to iterate through its + ``resources`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``GetXpnResources`` requests and continue to iterate + through the ``resources`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ProjectsGetXpnResources` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ProjectsGetXpnResources], + request: compute.GetXpnResourcesProjectsRequest, + response: compute.ProjectsGetXpnResources, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.GetXpnResourcesProjectsRequest): + The initial request object. + response (google.cloud.compute_v1.types.ProjectsGetXpnResources): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.GetXpnResourcesProjectsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ProjectsGetXpnResources]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.XpnResourceId]: + for page in self.pages: + yield from page.resources + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListXpnHostsPager: + """A pager for iterating through ``list_xpn_hosts`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.XpnHostList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListXpnHosts`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.XpnHostList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.XpnHostList], + request: compute.ListXpnHostsProjectsRequest, + response: compute.XpnHostList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListXpnHostsProjectsRequest): + The initial request object. + response (google.cloud.compute_v1.types.XpnHostList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListXpnHostsProjectsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.XpnHostList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Project]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/__init__.py new file mode 100644 index 000000000..e3b87e2a4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ProjectsTransport +from .rest import ProjectsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ProjectsTransport]] +_transport_registry['rest'] = ProjectsRestTransport + +__all__ = ( + 'ProjectsTransport', + 'ProjectsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/base.py new file mode 100644 index 000000000..dd8292c86 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/base.py @@ -0,0 +1,315 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ProjectsTransport(abc.ABC): + """Abstract transport class for Projects.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.disable_xpn_host: gapic_v1.method.wrap_method( + self.disable_xpn_host, + default_timeout=None, + client_info=client_info, + ), + self.disable_xpn_resource: gapic_v1.method.wrap_method( + self.disable_xpn_resource, + default_timeout=None, + client_info=client_info, + ), + self.enable_xpn_host: gapic_v1.method.wrap_method( + self.enable_xpn_host, + default_timeout=None, + client_info=client_info, + ), + self.enable_xpn_resource: gapic_v1.method.wrap_method( + self.enable_xpn_resource, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_xpn_host: gapic_v1.method.wrap_method( + self.get_xpn_host, + default_timeout=None, + client_info=client_info, + ), + self.get_xpn_resources: gapic_v1.method.wrap_method( + self.get_xpn_resources, + default_timeout=None, + client_info=client_info, + ), + self.list_xpn_hosts: gapic_v1.method.wrap_method( + self.list_xpn_hosts, + default_timeout=None, + client_info=client_info, + ), + self.move_disk: gapic_v1.method.wrap_method( + self.move_disk, + default_timeout=None, + client_info=client_info, + ), + self.move_instance: gapic_v1.method.wrap_method( + self.move_instance, + default_timeout=None, + client_info=client_info, + ), + self.set_common_instance_metadata: gapic_v1.method.wrap_method( + self.set_common_instance_metadata, + default_timeout=None, + client_info=client_info, + ), + self.set_default_network_tier: gapic_v1.method.wrap_method( + self.set_default_network_tier, + default_timeout=None, + client_info=client_info, + ), + self.set_usage_export_bucket: gapic_v1.method.wrap_method( + self.set_usage_export_bucket, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def disable_xpn_host(self) -> Callable[ + [compute.DisableXpnHostProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def disable_xpn_resource(self) -> Callable[ + [compute.DisableXpnResourceProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def enable_xpn_host(self) -> Callable[ + [compute.EnableXpnHostProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def enable_xpn_resource(self) -> Callable[ + [compute.EnableXpnResourceProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetProjectRequest], + Union[ + compute.Project, + Awaitable[compute.Project] + ]]: + raise NotImplementedError() + + @property + def get_xpn_host(self) -> Callable[ + [compute.GetXpnHostProjectRequest], + Union[ + compute.Project, + Awaitable[compute.Project] + ]]: + raise NotImplementedError() + + @property + def get_xpn_resources(self) -> Callable[ + [compute.GetXpnResourcesProjectsRequest], + Union[ + compute.ProjectsGetXpnResources, + Awaitable[compute.ProjectsGetXpnResources] + ]]: + raise NotImplementedError() + + @property + def list_xpn_hosts(self) -> Callable[ + [compute.ListXpnHostsProjectsRequest], + Union[ + compute.XpnHostList, + Awaitable[compute.XpnHostList] + ]]: + raise NotImplementedError() + + @property + def move_disk(self) -> Callable[ + [compute.MoveDiskProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def move_instance(self) -> Callable[ + [compute.MoveInstanceProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_common_instance_metadata(self) -> Callable[ + [compute.SetCommonInstanceMetadataProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_default_network_tier(self) -> Callable[ + [compute.SetDefaultNetworkTierProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_usage_export_bucket(self) -> Callable[ + [compute.SetUsageExportBucketProjectRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ProjectsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/rest.py new file mode 100644 index 000000000..ddb4258f9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/projects/transports/rest.py @@ -0,0 +1,1513 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ProjectsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ProjectsRestTransport(ProjectsTransport): + """REST backend transport for Projects. + + The Projects API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _disable_xpn_host(self, + request: compute.DisableXpnHostProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the disable xpn host method over HTTP. + + Args: + request (~.compute.DisableXpnHostProjectRequest): + The request object. A request message for + Projects.DisableXpnHost. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/disableXpnHost', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DisableXpnHostProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DisableXpnHostProjectRequest.to_json( + compute.DisableXpnHostProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _disable_xpn_resource(self, + request: compute.DisableXpnResourceProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the disable xpn resource method over HTTP. + + Args: + request (~.compute.DisableXpnResourceProjectRequest): + The request object. A request message for + Projects.DisableXpnResource. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/disableXpnResource', + 'body': 'projects_disable_xpn_resource_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.DisableXpnResourceProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ProjectsDisableXpnResourceRequest.to_json( + compute.ProjectsDisableXpnResourceRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DisableXpnResourceProjectRequest.to_json( + compute.DisableXpnResourceProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _enable_xpn_host(self, + request: compute.EnableXpnHostProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the enable xpn host method over HTTP. + + Args: + request (~.compute.EnableXpnHostProjectRequest): + The request object. A request message for + Projects.EnableXpnHost. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/enableXpnHost', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.EnableXpnHostProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.EnableXpnHostProjectRequest.to_json( + compute.EnableXpnHostProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _enable_xpn_resource(self, + request: compute.EnableXpnResourceProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the enable xpn resource method over HTTP. + + Args: + request (~.compute.EnableXpnResourceProjectRequest): + The request object. A request message for + Projects.EnableXpnResource. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/enableXpnResource', + 'body': 'projects_enable_xpn_resource_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.EnableXpnResourceProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ProjectsEnableXpnResourceRequest.to_json( + compute.ProjectsEnableXpnResourceRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.EnableXpnResourceProjectRequest.to_json( + compute.EnableXpnResourceProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Project: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetProjectRequest): + The request object. A request message for Projects.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Project: + Represents a Project resource. A + project is used to organize resources in + a Google Cloud Platform environment. For + more information, read about the + Resource Hierarchy. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetProjectRequest.to_json( + compute.GetProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Project.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_xpn_host(self, + request: compute.GetXpnHostProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Project: + r"""Call the get xpn host method over HTTP. + + Args: + request (~.compute.GetXpnHostProjectRequest): + The request object. A request message for + Projects.GetXpnHost. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Project: + Represents a Project resource. A + project is used to organize resources in + a Google Cloud Platform environment. For + more information, read about the + Resource Hierarchy. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/getXpnHost', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetXpnHostProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetXpnHostProjectRequest.to_json( + compute.GetXpnHostProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Project.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_xpn_resources(self, + request: compute.GetXpnResourcesProjectsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ProjectsGetXpnResources: + r"""Call the get xpn resources method over HTTP. + + Args: + request (~.compute.GetXpnResourcesProjectsRequest): + The request object. A request message for + Projects.GetXpnResources. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ProjectsGetXpnResources: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/getXpnResources', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.GetXpnResourcesProjectsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetXpnResourcesProjectsRequest.to_json( + compute.GetXpnResourcesProjectsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ProjectsGetXpnResources.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_xpn_hosts(self, + request: compute.ListXpnHostsProjectsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.XpnHostList: + r"""Call the list xpn hosts method over HTTP. + + Args: + request (~.compute.ListXpnHostsProjectsRequest): + The request object. A request message for + Projects.ListXpnHosts. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.XpnHostList: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/listXpnHosts', + 'body': 'projects_list_xpn_hosts_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListXpnHostsProjectsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ProjectsListXpnHostsRequest.to_json( + compute.ProjectsListXpnHostsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListXpnHostsProjectsRequest.to_json( + compute.ListXpnHostsProjectsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.XpnHostList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _move_disk(self, + request: compute.MoveDiskProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the move disk method over HTTP. + + Args: + request (~.compute.MoveDiskProjectRequest): + The request object. A request message for + Projects.MoveDisk. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/moveDisk', + 'body': 'disk_move_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.MoveDiskProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.DiskMoveRequest.to_json( + compute.DiskMoveRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.MoveDiskProjectRequest.to_json( + compute.MoveDiskProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _move_instance(self, + request: compute.MoveInstanceProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the move instance method over HTTP. + + Args: + request (~.compute.MoveInstanceProjectRequest): + The request object. A request message for + Projects.MoveInstance. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/moveInstance', + 'body': 'instance_move_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.MoveInstanceProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceMoveRequest.to_json( + compute.InstanceMoveRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.MoveInstanceProjectRequest.to_json( + compute.MoveInstanceProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_common_instance_metadata(self, + request: compute.SetCommonInstanceMetadataProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set common instance + metadata method over HTTP. + + Args: + request (~.compute.SetCommonInstanceMetadataProjectRequest): + The request object. A request message for + Projects.SetCommonInstanceMetadata. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/setCommonInstanceMetadata', + 'body': 'metadata_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.SetCommonInstanceMetadataProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Metadata.to_json( + compute.Metadata( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetCommonInstanceMetadataProjectRequest.to_json( + compute.SetCommonInstanceMetadataProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_default_network_tier(self, + request: compute.SetDefaultNetworkTierProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set default network tier method over HTTP. + + Args: + request (~.compute.SetDefaultNetworkTierProjectRequest): + The request object. A request message for + Projects.SetDefaultNetworkTier. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/setDefaultNetworkTier', + 'body': 'projects_set_default_network_tier_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.SetDefaultNetworkTierProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ProjectsSetDefaultNetworkTierRequest.to_json( + compute.ProjectsSetDefaultNetworkTierRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetDefaultNetworkTierProjectRequest.to_json( + compute.SetDefaultNetworkTierProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_usage_export_bucket(self, + request: compute.SetUsageExportBucketProjectRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set usage export bucket method over HTTP. + + Args: + request (~.compute.SetUsageExportBucketProjectRequest): + The request object. A request message for + Projects.SetUsageExportBucket. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/setUsageExportBucket', + 'body': 'usage_export_location_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.SetUsageExportBucketProjectRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UsageExportLocation.to_json( + compute.UsageExportLocation( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetUsageExportBucketProjectRequest.to_json( + compute.SetUsageExportBucketProjectRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def disable_xpn_host(self) -> Callable[ + [compute.DisableXpnHostProjectRequest], + compute.Operation]: + return self._disable_xpn_host + @ property + def disable_xpn_resource(self) -> Callable[ + [compute.DisableXpnResourceProjectRequest], + compute.Operation]: + return self._disable_xpn_resource + @ property + def enable_xpn_host(self) -> Callable[ + [compute.EnableXpnHostProjectRequest], + compute.Operation]: + return self._enable_xpn_host + @ property + def enable_xpn_resource(self) -> Callable[ + [compute.EnableXpnResourceProjectRequest], + compute.Operation]: + return self._enable_xpn_resource + @ property + def get(self) -> Callable[ + [compute.GetProjectRequest], + compute.Project]: + return self._get + @ property + def get_xpn_host(self) -> Callable[ + [compute.GetXpnHostProjectRequest], + compute.Project]: + return self._get_xpn_host + @ property + def get_xpn_resources(self) -> Callable[ + [compute.GetXpnResourcesProjectsRequest], + compute.ProjectsGetXpnResources]: + return self._get_xpn_resources + @ property + def list_xpn_hosts(self) -> Callable[ + [compute.ListXpnHostsProjectsRequest], + compute.XpnHostList]: + return self._list_xpn_hosts + @ property + def move_disk(self) -> Callable[ + [compute.MoveDiskProjectRequest], + compute.Operation]: + return self._move_disk + @ property + def move_instance(self) -> Callable[ + [compute.MoveInstanceProjectRequest], + compute.Operation]: + return self._move_instance + @ property + def set_common_instance_metadata(self) -> Callable[ + [compute.SetCommonInstanceMetadataProjectRequest], + compute.Operation]: + return self._set_common_instance_metadata + @ property + def set_default_network_tier(self) -> Callable[ + [compute.SetDefaultNetworkTierProjectRequest], + compute.Operation]: + return self._set_default_network_tier + @ property + def set_usage_export_bucket(self) -> Callable[ + [compute.SetUsageExportBucketProjectRequest], + compute.Operation]: + return self._set_usage_export_bucket + def close(self): + self._session.close() + + +__all__=( + 'ProjectsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/__init__.py new file mode 100644 index 000000000..1dd78622c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PublicAdvertisedPrefixesClient + +__all__ = ( + 'PublicAdvertisedPrefixesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/client.py new file mode 100644 index 000000000..b00c3e632 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/client.py @@ -0,0 +1,790 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.public_advertised_prefixes import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import PublicAdvertisedPrefixesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import PublicAdvertisedPrefixesRestTransport + + +class PublicAdvertisedPrefixesClientMeta(type): + """Metaclass for the PublicAdvertisedPrefixes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PublicAdvertisedPrefixesTransport]] + _transport_registry["rest"] = PublicAdvertisedPrefixesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PublicAdvertisedPrefixesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PublicAdvertisedPrefixesClient(metaclass=PublicAdvertisedPrefixesClientMeta): + """The PublicAdvertisedPrefixes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PublicAdvertisedPrefixesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PublicAdvertisedPrefixesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PublicAdvertisedPrefixesTransport: + """Returns the transport used by the client instance. + + Returns: + PublicAdvertisedPrefixesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PublicAdvertisedPrefixesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the public advertised prefixes client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PublicAdvertisedPrefixesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PublicAdvertisedPrefixesTransport): + # transport is a PublicAdvertisedPrefixesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeletePublicAdvertisedPrefixeRequest, dict] = None, + *, + project: str = None, + public_advertised_prefix: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified PublicAdvertisedPrefix + + Args: + request (Union[google.cloud.compute_v1.types.DeletePublicAdvertisedPrefixeRequest, dict]): + The request object. A request message for + PublicAdvertisedPrefixes.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_advertised_prefix (str): + Name of the PublicAdvertisedPrefix + resource to delete. + + This corresponds to the ``public_advertised_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_advertised_prefix]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeletePublicAdvertisedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeletePublicAdvertisedPrefixeRequest): + request = compute.DeletePublicAdvertisedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_advertised_prefix is not None: + request.public_advertised_prefix = public_advertised_prefix + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetPublicAdvertisedPrefixeRequest, dict] = None, + *, + project: str = None, + public_advertised_prefix: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.PublicAdvertisedPrefix: + r"""Returns the specified PublicAdvertisedPrefix + resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetPublicAdvertisedPrefixeRequest, dict]): + The request object. A request message for + PublicAdvertisedPrefixes.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_advertised_prefix (str): + Name of the PublicAdvertisedPrefix + resource to return. + + This corresponds to the ``public_advertised_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.PublicAdvertisedPrefix: + A public advertised prefix represents + an aggregated IP prefix or netblock + which customers bring to cloud. The IP + prefix is a single unit of route + advertisement and is announced globally + to the internet. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_advertised_prefix]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetPublicAdvertisedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetPublicAdvertisedPrefixeRequest): + request = compute.GetPublicAdvertisedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_advertised_prefix is not None: + request.public_advertised_prefix = public_advertised_prefix + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertPublicAdvertisedPrefixeRequest, dict] = None, + *, + project: str = None, + public_advertised_prefix_resource: compute.PublicAdvertisedPrefix = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a PublicAdvertisedPrefix in the specified + project using the parameters that are included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertPublicAdvertisedPrefixeRequest, dict]): + The request object. A request message for + PublicAdvertisedPrefixes.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_advertised_prefix_resource (google.cloud.compute_v1.types.PublicAdvertisedPrefix): + The body resource for this request + This corresponds to the ``public_advertised_prefix_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_advertised_prefix_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertPublicAdvertisedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertPublicAdvertisedPrefixeRequest): + request = compute.InsertPublicAdvertisedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_advertised_prefix_resource is not None: + request.public_advertised_prefix_resource = public_advertised_prefix_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListPublicAdvertisedPrefixesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists the PublicAdvertisedPrefixes for a project. + + Args: + request (Union[google.cloud.compute_v1.types.ListPublicAdvertisedPrefixesRequest, dict]): + The request object. A request message for + PublicAdvertisedPrefixes.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.public_advertised_prefixes.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListPublicAdvertisedPrefixesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListPublicAdvertisedPrefixesRequest): + request = compute.ListPublicAdvertisedPrefixesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchPublicAdvertisedPrefixeRequest, dict] = None, + *, + project: str = None, + public_advertised_prefix: str = None, + public_advertised_prefix_resource: compute.PublicAdvertisedPrefix = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified Router resource with the data + included in the request. This method supports PATCH + semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchPublicAdvertisedPrefixeRequest, dict]): + The request object. A request message for + PublicAdvertisedPrefixes.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_advertised_prefix (str): + Name of the PublicAdvertisedPrefix + resource to patch. + + This corresponds to the ``public_advertised_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_advertised_prefix_resource (google.cloud.compute_v1.types.PublicAdvertisedPrefix): + The body resource for this request + This corresponds to the ``public_advertised_prefix_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, public_advertised_prefix, public_advertised_prefix_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchPublicAdvertisedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchPublicAdvertisedPrefixeRequest): + request = compute.PatchPublicAdvertisedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if public_advertised_prefix is not None: + request.public_advertised_prefix = public_advertised_prefix + if public_advertised_prefix_resource is not None: + request.public_advertised_prefix_resource = public_advertised_prefix_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PublicAdvertisedPrefixesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/pagers.py new file mode 100644 index 000000000..71a8ede68 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.PublicAdvertisedPrefixList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.PublicAdvertisedPrefixList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.PublicAdvertisedPrefixList], + request: compute.ListPublicAdvertisedPrefixesRequest, + response: compute.PublicAdvertisedPrefixList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListPublicAdvertisedPrefixesRequest): + The initial request object. + response (google.cloud.compute_v1.types.PublicAdvertisedPrefixList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListPublicAdvertisedPrefixesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.PublicAdvertisedPrefixList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.PublicAdvertisedPrefix]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/__init__.py new file mode 100644 index 000000000..7bb8196d7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PublicAdvertisedPrefixesTransport +from .rest import PublicAdvertisedPrefixesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PublicAdvertisedPrefixesTransport]] +_transport_registry['rest'] = PublicAdvertisedPrefixesRestTransport + +__all__ = ( + 'PublicAdvertisedPrefixesTransport', + 'PublicAdvertisedPrefixesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/base.py new file mode 100644 index 000000000..6df90d0ee --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PublicAdvertisedPrefixesTransport(abc.ABC): + """Abstract transport class for PublicAdvertisedPrefixes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeletePublicAdvertisedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetPublicAdvertisedPrefixeRequest], + Union[ + compute.PublicAdvertisedPrefix, + Awaitable[compute.PublicAdvertisedPrefix] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertPublicAdvertisedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListPublicAdvertisedPrefixesRequest], + Union[ + compute.PublicAdvertisedPrefixList, + Awaitable[compute.PublicAdvertisedPrefixList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchPublicAdvertisedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'PublicAdvertisedPrefixesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/rest.py new file mode 100644 index 000000000..2bb169a3a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_advertised_prefixes/transports/rest.py @@ -0,0 +1,657 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import PublicAdvertisedPrefixesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class PublicAdvertisedPrefixesRestTransport(PublicAdvertisedPrefixesTransport): + """REST backend transport for PublicAdvertisedPrefixes. + + The PublicAdvertisedPrefixes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeletePublicAdvertisedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeletePublicAdvertisedPrefixeRequest): + The request object. A request message for + PublicAdvertisedPrefixes.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/publicAdvertisedPrefixes/{public_advertised_prefix}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_advertised_prefix", + "publicAdvertisedPrefix" + ), + ] + + request_kwargs = compute.DeletePublicAdvertisedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeletePublicAdvertisedPrefixeRequest.to_json( + compute.DeletePublicAdvertisedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetPublicAdvertisedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PublicAdvertisedPrefix: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetPublicAdvertisedPrefixeRequest): + The request object. A request message for + PublicAdvertisedPrefixes.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PublicAdvertisedPrefix: + A public advertised prefix represents + an aggregated IP prefix or netblock + which customers bring to cloud. The IP + prefix is a single unit of route + advertisement and is announced globally + to the internet. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/publicAdvertisedPrefixes/{public_advertised_prefix}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_advertised_prefix", + "publicAdvertisedPrefix" + ), + ] + + request_kwargs = compute.GetPublicAdvertisedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetPublicAdvertisedPrefixeRequest.to_json( + compute.GetPublicAdvertisedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PublicAdvertisedPrefix.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertPublicAdvertisedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertPublicAdvertisedPrefixeRequest): + The request object. A request message for + PublicAdvertisedPrefixes.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/publicAdvertisedPrefixes', + 'body': 'public_advertised_prefix_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertPublicAdvertisedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PublicAdvertisedPrefix.to_json( + compute.PublicAdvertisedPrefix( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertPublicAdvertisedPrefixeRequest.to_json( + compute.InsertPublicAdvertisedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListPublicAdvertisedPrefixesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PublicAdvertisedPrefixList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListPublicAdvertisedPrefixesRequest): + The request object. A request message for + PublicAdvertisedPrefixes.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PublicAdvertisedPrefixList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/publicAdvertisedPrefixes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListPublicAdvertisedPrefixesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListPublicAdvertisedPrefixesRequest.to_json( + compute.ListPublicAdvertisedPrefixesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PublicAdvertisedPrefixList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchPublicAdvertisedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchPublicAdvertisedPrefixeRequest): + The request object. A request message for + PublicAdvertisedPrefixes.Patch. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/publicAdvertisedPrefixes/{public_advertised_prefix}', + 'body': 'public_advertised_prefix_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_advertised_prefix", + "publicAdvertisedPrefix" + ), + ] + + request_kwargs = compute.PatchPublicAdvertisedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PublicAdvertisedPrefix.to_json( + compute.PublicAdvertisedPrefix( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchPublicAdvertisedPrefixeRequest.to_json( + compute.PatchPublicAdvertisedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeletePublicAdvertisedPrefixeRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetPublicAdvertisedPrefixeRequest], + compute.PublicAdvertisedPrefix]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertPublicAdvertisedPrefixeRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListPublicAdvertisedPrefixesRequest], + compute.PublicAdvertisedPrefixList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchPublicAdvertisedPrefixeRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'PublicAdvertisedPrefixesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/__init__.py new file mode 100644 index 000000000..ba7462aff --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import PublicDelegatedPrefixesClient + +__all__ = ( + 'PublicDelegatedPrefixesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/client.py new file mode 100644 index 000000000..7ccb30c63 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/client.py @@ -0,0 +1,914 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.public_delegated_prefixes import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import PublicDelegatedPrefixesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import PublicDelegatedPrefixesRestTransport + + +class PublicDelegatedPrefixesClientMeta(type): + """Metaclass for the PublicDelegatedPrefixes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[PublicDelegatedPrefixesTransport]] + _transport_registry["rest"] = PublicDelegatedPrefixesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[PublicDelegatedPrefixesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class PublicDelegatedPrefixesClient(metaclass=PublicDelegatedPrefixesClientMeta): + """The PublicDelegatedPrefixes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PublicDelegatedPrefixesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + PublicDelegatedPrefixesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> PublicDelegatedPrefixesTransport: + """Returns the transport used by the client instance. + + Returns: + PublicDelegatedPrefixesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, PublicDelegatedPrefixesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the public delegated prefixes client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, PublicDelegatedPrefixesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, PublicDelegatedPrefixesTransport): + # transport is a PublicDelegatedPrefixesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListPublicDelegatedPrefixesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Lists all PublicDelegatedPrefix resources owned by + the specific project across all scopes. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListPublicDelegatedPrefixesRequest, dict]): + The request object. A request message for + PublicDelegatedPrefixes.AggregatedList. See the method + description for details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.public_delegated_prefixes.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListPublicDelegatedPrefixesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListPublicDelegatedPrefixesRequest): + request = compute.AggregatedListPublicDelegatedPrefixesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeletePublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + region: str = None, + public_delegated_prefix: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified PublicDelegatedPrefix in the + given region. + + Args: + request (Union[google.cloud.compute_v1.types.DeletePublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + PublicDelegatedPrefixes.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix + resource to delete. + + This corresponds to the ``public_delegated_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, public_delegated_prefix]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeletePublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeletePublicDelegatedPrefixeRequest): + request = compute.DeletePublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if public_delegated_prefix is not None: + request.public_delegated_prefix = public_delegated_prefix + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetPublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + region: str = None, + public_delegated_prefix: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.PublicDelegatedPrefix: + r"""Returns the specified PublicDelegatedPrefix resource + in the given region. + + Args: + request (Union[google.cloud.compute_v1.types.GetPublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + PublicDelegatedPrefixes.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix + resource to return. + + This corresponds to the ``public_delegated_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.PublicDelegatedPrefix: + A PublicDelegatedPrefix resource + represents an IP block within a + PublicAdvertisedPrefix that is + configured within a single cloud scope + (global or region). IPs in the block can + be allocated to resources within that + scope. Public delegated prefixes may be + further broken up into smaller IP blocks + in the same scope as the parent block. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, public_delegated_prefix]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetPublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetPublicDelegatedPrefixeRequest): + request = compute.GetPublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if public_delegated_prefix is not None: + request.public_delegated_prefix = public_delegated_prefix + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertPublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + region: str = None, + public_delegated_prefix_resource: compute.PublicDelegatedPrefix = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a PublicDelegatedPrefix in the specified + project in the given region using the parameters that + are included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertPublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + PublicDelegatedPrefixes.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + This corresponds to the ``public_delegated_prefix_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, public_delegated_prefix_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertPublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertPublicDelegatedPrefixeRequest): + request = compute.InsertPublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if public_delegated_prefix_resource is not None: + request.public_delegated_prefix_resource = public_delegated_prefix_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListPublicDelegatedPrefixesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists the PublicDelegatedPrefixes for a project in + the given region. + + Args: + request (Union[google.cloud.compute_v1.types.ListPublicDelegatedPrefixesRequest, dict]): + The request object. A request message for + PublicDelegatedPrefixes.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.public_delegated_prefixes.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListPublicDelegatedPrefixesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListPublicDelegatedPrefixesRequest): + request = compute.ListPublicDelegatedPrefixesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchPublicDelegatedPrefixeRequest, dict] = None, + *, + project: str = None, + region: str = None, + public_delegated_prefix: str = None, + public_delegated_prefix_resource: compute.PublicDelegatedPrefix = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified PublicDelegatedPrefix resource + with the data included in the request. This method + supports PATCH semantics and uses JSON merge patch + format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchPublicDelegatedPrefixeRequest, dict]): + The request object. A request message for + PublicDelegatedPrefixes.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix + resource to patch. + + This corresponds to the ``public_delegated_prefix`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + This corresponds to the ``public_delegated_prefix_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, public_delegated_prefix, public_delegated_prefix_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchPublicDelegatedPrefixeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchPublicDelegatedPrefixeRequest): + request = compute.PatchPublicDelegatedPrefixeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if public_delegated_prefix is not None: + request.public_delegated_prefix = public_delegated_prefix + if public_delegated_prefix_resource is not None: + request.public_delegated_prefix_resource = public_delegated_prefix_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "PublicDelegatedPrefixesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/pagers.py new file mode 100644 index 000000000..aeb157045 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.PublicDelegatedPrefixAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.PublicDelegatedPrefixAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.PublicDelegatedPrefixAggregatedList], + request: compute.AggregatedListPublicDelegatedPrefixesRequest, + response: compute.PublicDelegatedPrefixAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListPublicDelegatedPrefixesRequest): + The initial request object. + response (google.cloud.compute_v1.types.PublicDelegatedPrefixAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListPublicDelegatedPrefixesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.PublicDelegatedPrefixAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.PublicDelegatedPrefixesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.PublicDelegatedPrefixesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.PublicDelegatedPrefixList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.PublicDelegatedPrefixList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.PublicDelegatedPrefixList], + request: compute.ListPublicDelegatedPrefixesRequest, + response: compute.PublicDelegatedPrefixList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListPublicDelegatedPrefixesRequest): + The initial request object. + response (google.cloud.compute_v1.types.PublicDelegatedPrefixList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListPublicDelegatedPrefixesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.PublicDelegatedPrefixList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.PublicDelegatedPrefix]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/__init__.py new file mode 100644 index 000000000..3ef55d242 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import PublicDelegatedPrefixesTransport +from .rest import PublicDelegatedPrefixesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[PublicDelegatedPrefixesTransport]] +_transport_registry['rest'] = PublicDelegatedPrefixesRestTransport + +__all__ = ( + 'PublicDelegatedPrefixesTransport', + 'PublicDelegatedPrefixesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/base.py new file mode 100644 index 000000000..91e25c931 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class PublicDelegatedPrefixesTransport(abc.ABC): + """Abstract transport class for PublicDelegatedPrefixes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListPublicDelegatedPrefixesRequest], + Union[ + compute.PublicDelegatedPrefixAggregatedList, + Awaitable[compute.PublicDelegatedPrefixAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeletePublicDelegatedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetPublicDelegatedPrefixeRequest], + Union[ + compute.PublicDelegatedPrefix, + Awaitable[compute.PublicDelegatedPrefix] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertPublicDelegatedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListPublicDelegatedPrefixesRequest], + Union[ + compute.PublicDelegatedPrefixList, + Awaitable[compute.PublicDelegatedPrefixList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchPublicDelegatedPrefixeRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'PublicDelegatedPrefixesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/rest.py new file mode 100644 index 000000000..df0deddaf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/public_delegated_prefixes/transports/rest.py @@ -0,0 +1,770 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import PublicDelegatedPrefixesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class PublicDelegatedPrefixesRestTransport(PublicDelegatedPrefixesTransport): + """REST backend transport for PublicDelegatedPrefixes. + + The PublicDelegatedPrefixes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListPublicDelegatedPrefixesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PublicDelegatedPrefixAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListPublicDelegatedPrefixesRequest): + The request object. A request message for + PublicDelegatedPrefixes.AggregatedList. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PublicDelegatedPrefixAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/publicDelegatedPrefixes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListPublicDelegatedPrefixesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListPublicDelegatedPrefixesRequest.to_json( + compute.AggregatedListPublicDelegatedPrefixesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PublicDelegatedPrefixAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeletePublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeletePublicDelegatedPrefixeRequest): + The request object. A request message for + PublicDelegatedPrefixes.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes/{public_delegated_prefix}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_delegated_prefix", + "publicDelegatedPrefix" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeletePublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeletePublicDelegatedPrefixeRequest.to_json( + compute.DeletePublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetPublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PublicDelegatedPrefix: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetPublicDelegatedPrefixeRequest): + The request object. A request message for + PublicDelegatedPrefixes.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PublicDelegatedPrefix: + A PublicDelegatedPrefix resource + represents an IP block within a + PublicAdvertisedPrefix that is + configured within a single cloud scope + (global or region). IPs in the block can + be allocated to resources within that + scope. Public delegated prefixes may be + further broken up into smaller IP blocks + in the same scope as the parent block. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes/{public_delegated_prefix}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_delegated_prefix", + "publicDelegatedPrefix" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetPublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetPublicDelegatedPrefixeRequest.to_json( + compute.GetPublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PublicDelegatedPrefix.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertPublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertPublicDelegatedPrefixeRequest): + The request object. A request message for + PublicDelegatedPrefixes.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes', + 'body': 'public_delegated_prefix_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertPublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PublicDelegatedPrefix.to_json( + compute.PublicDelegatedPrefix( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertPublicDelegatedPrefixeRequest.to_json( + compute.InsertPublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListPublicDelegatedPrefixesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.PublicDelegatedPrefixList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListPublicDelegatedPrefixesRequest): + The request object. A request message for + PublicDelegatedPrefixes.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.PublicDelegatedPrefixList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListPublicDelegatedPrefixesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListPublicDelegatedPrefixesRequest.to_json( + compute.ListPublicDelegatedPrefixesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.PublicDelegatedPrefixList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchPublicDelegatedPrefixeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchPublicDelegatedPrefixeRequest): + The request object. A request message for + PublicDelegatedPrefixes.Patch. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes/{public_delegated_prefix}', + 'body': 'public_delegated_prefix_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "public_delegated_prefix", + "publicDelegatedPrefix" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchPublicDelegatedPrefixeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.PublicDelegatedPrefix.to_json( + compute.PublicDelegatedPrefix( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchPublicDelegatedPrefixeRequest.to_json( + compute.PatchPublicDelegatedPrefixeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListPublicDelegatedPrefixesRequest], + compute.PublicDelegatedPrefixAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeletePublicDelegatedPrefixeRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetPublicDelegatedPrefixeRequest], + compute.PublicDelegatedPrefix]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertPublicDelegatedPrefixeRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListPublicDelegatedPrefixesRequest], + compute.PublicDelegatedPrefixList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchPublicDelegatedPrefixeRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'PublicDelegatedPrefixesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/__init__.py new file mode 100644 index 000000000..14741c0a5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionAutoscalersClient + +__all__ = ( + 'RegionAutoscalersClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/client.py new file mode 100644 index 000000000..13e44b53b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/client.py @@ -0,0 +1,929 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_autoscalers import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionAutoscalersTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionAutoscalersRestTransport + + +class RegionAutoscalersClientMeta(type): + """Metaclass for the RegionAutoscalers client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionAutoscalersTransport]] + _transport_registry["rest"] = RegionAutoscalersRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionAutoscalersTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionAutoscalersClient(metaclass=RegionAutoscalersClientMeta): + """The RegionAutoscalers API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionAutoscalersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionAutoscalersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionAutoscalersTransport: + """Returns the transport used by the client instance. + + Returns: + RegionAutoscalersTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionAutoscalersTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region autoscalers client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionAutoscalersTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionAutoscalersTransport): + # transport is a RegionAutoscalersTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionAutoscalerRequest, dict] = None, + *, + project: str = None, + region: str = None, + autoscaler: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified autoscaler. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionAutoscalerRequest, dict]): + The request object. A request message for + RegionAutoscalers.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler (str): + Name of the autoscaler to delete. + This corresponds to the ``autoscaler`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, autoscaler]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionAutoscalerRequest): + request = compute.DeleteRegionAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if autoscaler is not None: + request.autoscaler = autoscaler + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionAutoscalerRequest, dict] = None, + *, + project: str = None, + region: str = None, + autoscaler: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Autoscaler: + r"""Returns the specified autoscaler. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionAutoscalerRequest, dict]): + The request object. A request message for + RegionAutoscalers.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler (str): + Name of the autoscaler to return. + This corresponds to the ``autoscaler`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Autoscaler: + Represents an Autoscaler resource. Google Compute Engine + has two Autoscaler resources: \* + [Zonal](/compute/docs/reference/rest/v1/autoscalers) \* + [Regional](/compute/docs/reference/rest/v1/regionAutoscalers) + Use autoscalers to automatically add or delete instances + from a managed instance group according to your defined + autoscaling policy. For more information, read + Autoscaling Groups of Instances. For zonal managed + instance groups resource, use the autoscaler resource. + For regional managed instance groups, use the + regionAutoscalers resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, autoscaler]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionAutoscalerRequest): + request = compute.GetRegionAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if autoscaler is not None: + request.autoscaler = autoscaler + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionAutoscalerRequest, dict] = None, + *, + project: str = None, + region: str = None, + autoscaler_resource: compute.Autoscaler = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates an autoscaler in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionAutoscalerRequest, dict]): + The request object. A request message for + RegionAutoscalers.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + This corresponds to the ``autoscaler_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, autoscaler_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionAutoscalerRequest): + request = compute.InsertRegionAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if autoscaler_resource is not None: + request.autoscaler_resource = autoscaler_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionAutoscalersRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of autoscalers contained within the + specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionAutoscalersRequest, dict]): + The request object. A request message for + RegionAutoscalers.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_autoscalers.pagers.ListPager: + Contains a list of autoscalers. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionAutoscalersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionAutoscalersRequest): + request = compute.ListRegionAutoscalersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchRegionAutoscalerRequest, dict] = None, + *, + project: str = None, + region: str = None, + autoscaler_resource: compute.Autoscaler = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates an autoscaler in the specified project using + the data included in the request. This method supports + PATCH semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRegionAutoscalerRequest, dict]): + The request object. A request message for + RegionAutoscalers.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + This corresponds to the ``autoscaler_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, autoscaler_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRegionAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRegionAutoscalerRequest): + request = compute.PatchRegionAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if autoscaler_resource is not None: + request.autoscaler_resource = autoscaler_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateRegionAutoscalerRequest, dict] = None, + *, + project: str = None, + region: str = None, + autoscaler_resource: compute.Autoscaler = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates an autoscaler in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateRegionAutoscalerRequest, dict]): + The request object. A request message for + RegionAutoscalers.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + This corresponds to the ``autoscaler_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, autoscaler_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateRegionAutoscalerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateRegionAutoscalerRequest): + request = compute.UpdateRegionAutoscalerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if autoscaler_resource is not None: + request.autoscaler_resource = autoscaler_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionAutoscalersClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/pagers.py new file mode 100644 index 000000000..278821b51 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionAutoscalerList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionAutoscalerList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionAutoscalerList], + request: compute.ListRegionAutoscalersRequest, + response: compute.RegionAutoscalerList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionAutoscalersRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionAutoscalerList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionAutoscalersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionAutoscalerList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Autoscaler]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/__init__.py new file mode 100644 index 000000000..6b15dbc88 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionAutoscalersTransport +from .rest import RegionAutoscalersRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionAutoscalersTransport]] +_transport_registry['rest'] = RegionAutoscalersRestTransport + +__all__ = ( + 'RegionAutoscalersTransport', + 'RegionAutoscalersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/base.py new file mode 100644 index 000000000..685022b17 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionAutoscalersTransport(abc.ABC): + """Abstract transport class for RegionAutoscalers.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionAutoscalerRequest], + Union[ + compute.Autoscaler, + Awaitable[compute.Autoscaler] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionAutoscalersRequest], + Union[ + compute.RegionAutoscalerList, + Awaitable[compute.RegionAutoscalerList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchRegionAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateRegionAutoscalerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionAutoscalersTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/rest.py new file mode 100644 index 000000000..d7897dddd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_autoscalers/transports/rest.py @@ -0,0 +1,797 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionAutoscalersTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionAutoscalersRestTransport(RegionAutoscalersTransport): + """REST backend transport for RegionAutoscalers. + + The RegionAutoscalers API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionAutoscalerRequest): + The request object. A request message for + RegionAutoscalers.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/autoscalers/{autoscaler}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "autoscaler", + "autoscaler" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionAutoscalerRequest.to_json( + compute.DeleteRegionAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Autoscaler: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionAutoscalerRequest): + The request object. A request message for + RegionAutoscalers.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Autoscaler: + Represents an Autoscaler resource. Google Compute Engine + has two Autoscaler resources: \* + `Zonal `__ + \* + `Regional `__ + Use autoscalers to automatically add or delete instances + from a managed instance group according to your defined + autoscaling policy. For more information, read + Autoscaling Groups of Instances. For zonal managed + instance groups resource, use the autoscaler resource. + For regional managed instance groups, use the + regionAutoscalers resource. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/autoscalers/{autoscaler}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "autoscaler", + "autoscaler" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionAutoscalerRequest.to_json( + compute.GetRegionAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Autoscaler.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionAutoscalerRequest): + The request object. A request message for + RegionAutoscalers.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/autoscalers', + 'body': 'autoscaler_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Autoscaler.to_json( + compute.Autoscaler( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionAutoscalerRequest.to_json( + compute.InsertRegionAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionAutoscalersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionAutoscalerList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionAutoscalersRequest): + The request object. A request message for + RegionAutoscalers.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionAutoscalerList: + Contains a list of autoscalers. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/autoscalers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionAutoscalersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionAutoscalersRequest.to_json( + compute.ListRegionAutoscalersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionAutoscalerList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchRegionAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchRegionAutoscalerRequest): + The request object. A request message for + RegionAutoscalers.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/autoscalers', + 'body': 'autoscaler_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchRegionAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Autoscaler.to_json( + compute.Autoscaler( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRegionAutoscalerRequest.to_json( + compute.PatchRegionAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateRegionAutoscalerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateRegionAutoscalerRequest): + The request object. A request message for + RegionAutoscalers.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/regions/{region}/autoscalers', + 'body': 'autoscaler_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.UpdateRegionAutoscalerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Autoscaler.to_json( + compute.Autoscaler( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateRegionAutoscalerRequest.to_json( + compute.UpdateRegionAutoscalerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionAutoscalerRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionAutoscalerRequest], + compute.Autoscaler]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionAutoscalerRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionAutoscalersRequest], + compute.RegionAutoscalerList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchRegionAutoscalerRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateRegionAutoscalerRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'RegionAutoscalersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/__init__.py new file mode 100644 index 000000000..2e88fa79f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionBackendServicesClient + +__all__ = ( + 'RegionBackendServicesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/client.py new file mode 100644 index 000000000..c28526723 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/client.py @@ -0,0 +1,1058 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_backend_services import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionBackendServicesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionBackendServicesRestTransport + + +class RegionBackendServicesClientMeta(type): + """Metaclass for the RegionBackendServices client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionBackendServicesTransport]] + _transport_registry["rest"] = RegionBackendServicesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionBackendServicesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionBackendServicesClient(metaclass=RegionBackendServicesClientMeta): + """The RegionBackendServices API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionBackendServicesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionBackendServicesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionBackendServicesTransport: + """Returns the transport used by the client instance. + + Returns: + RegionBackendServicesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionBackendServicesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region backend services client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionBackendServicesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionBackendServicesTransport): + # transport is a RegionBackendServicesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionBackendServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + backend_service: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified regional BackendService + resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionBackendServiceRequest, dict]): + The request object. A request message for + RegionBackendServices.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to delete. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, backend_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionBackendServiceRequest): + request = compute.DeleteRegionBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if backend_service is not None: + request.backend_service = backend_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionBackendServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + backend_service: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.BackendService: + r"""Returns the specified regional BackendService + resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionBackendServiceRequest, dict]): + The request object. A request message for + RegionBackendServices.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to return. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.BackendService: + Represents a Backend Service resource. A backend service + defines how Google Cloud load balancers distribute + traffic. The backend service configuration contains a + set of values, such as the protocol used to connect to + backends, various distribution and session settings, + health checks, and timeouts. These settings provide + fine-grained control over how your load balancer + behaves. Most of the settings have default values that + allow for easy configuration if you need to get started + quickly. Backend services in Google Compute Engine can + be either regionally or globally scoped. \* + [Global](https://cloud.google.com/compute/docs/reference/rest/v1/backendServices) + \* + [Regional](https://cloud.google.com/compute/docs/reference/rest/v1/regionBackendServices) + For more information, see Backend Services. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, backend_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionBackendServiceRequest): + request = compute.GetRegionBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if backend_service is not None: + request.backend_service = backend_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_health(self, + request: Union[compute.GetHealthRegionBackendServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + backend_service: str = None, + resource_group_reference_resource: compute.ResourceGroupReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.BackendServiceGroupHealth: + r"""Gets the most recent health check results for this + regional BackendService. + + Args: + request (Union[google.cloud.compute_v1.types.GetHealthRegionBackendServiceRequest, dict]): + The request object. A request message for + RegionBackendServices.GetHealth. See the method + description for details. + project (str): + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + for which to get health. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_group_reference_resource (google.cloud.compute_v1.types.ResourceGroupReference): + The body resource for this request + This corresponds to the ``resource_group_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.BackendServiceGroupHealth: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, backend_service, resource_group_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetHealthRegionBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetHealthRegionBackendServiceRequest): + request = compute.GetHealthRegionBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if backend_service is not None: + request.backend_service = backend_service + if resource_group_reference_resource is not None: + request.resource_group_reference_resource = resource_group_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_health] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionBackendServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + backend_service_resource: compute.BackendService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a regional BackendService resource in the + specified project using the data included in the + request. For more information, see Backend services + overview. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionBackendServiceRequest, dict]): + The request object. A request message for + RegionBackendServices.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + This corresponds to the ``backend_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, backend_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionBackendServiceRequest): + request = compute.InsertRegionBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if backend_service_resource is not None: + request.backend_service_resource = backend_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionBackendServicesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of regional BackendService + resources available to the specified project in the + given region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionBackendServicesRequest, dict]): + The request object. A request message for + RegionBackendServices.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_backend_services.pagers.ListPager: + Contains a list of BackendService + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionBackendServicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionBackendServicesRequest): + request = compute.ListRegionBackendServicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchRegionBackendServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + backend_service: str = None, + backend_service_resource: compute.BackendService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified regional BackendService + resource with the data included in the request. For more + information, see Understanding backend services This + method supports PATCH semantics and uses the JSON merge + patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRegionBackendServiceRequest, dict]): + The request object. A request message for + RegionBackendServices.Patch. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to patch. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + This corresponds to the ``backend_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, backend_service, backend_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRegionBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRegionBackendServiceRequest): + request = compute.PatchRegionBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if backend_service is not None: + request.backend_service = backend_service + if backend_service_resource is not None: + request.backend_service_resource = backend_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateRegionBackendServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + backend_service: str = None, + backend_service_resource: compute.BackendService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified regional BackendService + resource with the data included in the request. For more + information, see Backend services overview . + + Args: + request (Union[google.cloud.compute_v1.types.UpdateRegionBackendServiceRequest, dict]): + The request object. A request message for + RegionBackendServices.Update. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service (str): + Name of the BackendService resource + to update. + + This corresponds to the ``backend_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + This corresponds to the ``backend_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, backend_service, backend_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateRegionBackendServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateRegionBackendServiceRequest): + request = compute.UpdateRegionBackendServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if backend_service is not None: + request.backend_service = backend_service + if backend_service_resource is not None: + request.backend_service_resource = backend_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionBackendServicesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/pagers.py new file mode 100644 index 000000000..15c5dff54 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.BackendServiceList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.BackendServiceList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.BackendServiceList], + request: compute.ListRegionBackendServicesRequest, + response: compute.BackendServiceList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionBackendServicesRequest): + The initial request object. + response (google.cloud.compute_v1.types.BackendServiceList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionBackendServicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.BackendServiceList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.BackendService]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/__init__.py new file mode 100644 index 000000000..16b6cc15d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionBackendServicesTransport +from .rest import RegionBackendServicesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionBackendServicesTransport]] +_transport_registry['rest'] = RegionBackendServicesRestTransport + +__all__ = ( + 'RegionBackendServicesTransport', + 'RegionBackendServicesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/base.py new file mode 100644 index 000000000..2ac1f3ce2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionBackendServicesTransport(abc.ABC): + """Abstract transport class for RegionBackendServices.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_health: gapic_v1.method.wrap_method( + self.get_health, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionBackendServiceRequest], + Union[ + compute.BackendService, + Awaitable[compute.BackendService] + ]]: + raise NotImplementedError() + + @property + def get_health(self) -> Callable[ + [compute.GetHealthRegionBackendServiceRequest], + Union[ + compute.BackendServiceGroupHealth, + Awaitable[compute.BackendServiceGroupHealth] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionBackendServicesRequest], + Union[ + compute.BackendServiceList, + Awaitable[compute.BackendServiceList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchRegionBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateRegionBackendServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionBackendServicesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/rest.py new file mode 100644 index 000000000..149b5e8b2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_backend_services/transports/rest.py @@ -0,0 +1,917 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionBackendServicesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionBackendServicesRestTransport(RegionBackendServicesTransport): + """REST backend transport for RegionBackendServices. + + The RegionBackendServices API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionBackendServiceRequest): + The request object. A request message for + RegionBackendServices.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionBackendServiceRequest.to_json( + compute.DeleteRegionBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendService: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionBackendServiceRequest): + The request object. A request message for + RegionBackendServices.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendService: + Represents a Backend Service resource. A backend service + defines how Google Cloud load balancers distribute + traffic. The backend service configuration contains a + set of values, such as the protocol used to connect to + backends, various distribution and session settings, + health checks, and timeouts. These settings provide + fine-grained control over how your load balancer + behaves. Most of the settings have default values that + allow for easy configuration if you need to get started + quickly. Backend services in Google Compute Engine can + be either regionally or globally scoped. \* + `Global `__ + \* + `Regional `__ + For more information, see Backend Services. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionBackendServiceRequest.to_json( + compute.GetRegionBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendService.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_health(self, + request: compute.GetHealthRegionBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendServiceGroupHealth: + r"""Call the get health method over HTTP. + + Args: + request (~.compute.GetHealthRegionBackendServiceRequest): + The request object. A request message for + RegionBackendServices.GetHealth. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendServiceGroupHealth: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}/getHealth', + 'body': 'resource_group_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetHealthRegionBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ResourceGroupReference.to_json( + compute.ResourceGroupReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetHealthRegionBackendServiceRequest.to_json( + compute.GetHealthRegionBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendServiceGroupHealth.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionBackendServiceRequest): + The request object. A request message for + RegionBackendServices.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/backendServices', + 'body': 'backend_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendService.to_json( + compute.BackendService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionBackendServiceRequest.to_json( + compute.InsertRegionBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionBackendServicesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.BackendServiceList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionBackendServicesRequest): + The request object. A request message for + RegionBackendServices.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.BackendServiceList: + Contains a list of BackendService + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/backendServices', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionBackendServicesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionBackendServicesRequest.to_json( + compute.ListRegionBackendServicesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.BackendServiceList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchRegionBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchRegionBackendServiceRequest): + The request object. A request message for + RegionBackendServices.Patch. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}', + 'body': 'backend_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchRegionBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendService.to_json( + compute.BackendService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRegionBackendServiceRequest.to_json( + compute.PatchRegionBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateRegionBackendServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateRegionBackendServiceRequest): + The request object. A request message for + RegionBackendServices.Update. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}', + 'body': 'backend_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "backend_service", + "backendService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.UpdateRegionBackendServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BackendService.to_json( + compute.BackendService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateRegionBackendServiceRequest.to_json( + compute.UpdateRegionBackendServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionBackendServiceRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionBackendServiceRequest], + compute.BackendService]: + return self._get + @ property + def get_health(self) -> Callable[ + [compute.GetHealthRegionBackendServiceRequest], + compute.BackendServiceGroupHealth]: + return self._get_health + @ property + def insert(self) -> Callable[ + [compute.InsertRegionBackendServiceRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionBackendServicesRequest], + compute.BackendServiceList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchRegionBackendServiceRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateRegionBackendServiceRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'RegionBackendServicesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/__init__.py new file mode 100644 index 000000000..04dff2bd6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionCommitmentsClient + +__all__ = ( + 'RegionCommitmentsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/client.py new file mode 100644 index 000000000..edfb954b3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/client.py @@ -0,0 +1,706 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_commitments import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionCommitmentsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionCommitmentsRestTransport + + +class RegionCommitmentsClientMeta(type): + """Metaclass for the RegionCommitments client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionCommitmentsTransport]] + _transport_registry["rest"] = RegionCommitmentsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionCommitmentsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionCommitmentsClient(metaclass=RegionCommitmentsClientMeta): + """The RegionCommitments API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionCommitmentsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionCommitmentsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionCommitmentsTransport: + """Returns the transport used by the client instance. + + Returns: + RegionCommitmentsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionCommitmentsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region commitments client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionCommitmentsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionCommitmentsTransport): + # transport is a RegionCommitmentsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListRegionCommitmentsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of commitments by + region. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListRegionCommitmentsRequest, dict]): + The request object. A request message for + RegionCommitments.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_commitments.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListRegionCommitmentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListRegionCommitmentsRequest): + request = compute.AggregatedListRegionCommitmentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionCommitmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + commitment: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Commitment: + r"""Returns the specified commitment resource. Gets a + list of available commitments by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionCommitmentRequest, dict]): + The request object. A request message for + RegionCommitments.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + commitment (str): + Name of the commitment to return. + This corresponds to the ``commitment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Commitment: + Represents a regional Commitment + resource. Creating a commitment resource + means that you are purchasing a + committed use contract with an explicit + start and end time. You can create + commitments based on vCPUs and memory + usage and receive discounted rates. For + full details, read Signing Up for + Committed Use Discounts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, commitment]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionCommitmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionCommitmentRequest): + request = compute.GetRegionCommitmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if commitment is not None: + request.commitment = commitment + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionCommitmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + commitment_resource: compute.Commitment = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a commitment in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionCommitmentRequest, dict]): + The request object. A request message for + RegionCommitments.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + commitment_resource (google.cloud.compute_v1.types.Commitment): + The body resource for this request + This corresponds to the ``commitment_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, commitment_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionCommitmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionCommitmentRequest): + request = compute.InsertRegionCommitmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if commitment_resource is not None: + request.commitment_resource = commitment_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionCommitmentsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of commitments contained within the + specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionCommitmentsRequest, dict]): + The request object. A request message for + RegionCommitments.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_commitments.pagers.ListPager: + Contains a list of Commitment + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionCommitmentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionCommitmentsRequest): + request = compute.ListRegionCommitmentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionCommitmentsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/pagers.py new file mode 100644 index 000000000..7fe8c1b1d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.CommitmentAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.CommitmentAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.CommitmentAggregatedList], + request: compute.AggregatedListRegionCommitmentsRequest, + response: compute.CommitmentAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListRegionCommitmentsRequest): + The initial request object. + response (google.cloud.compute_v1.types.CommitmentAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListRegionCommitmentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.CommitmentAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.CommitmentsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.CommitmentsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.CommitmentList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.CommitmentList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.CommitmentList], + request: compute.ListRegionCommitmentsRequest, + response: compute.CommitmentList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionCommitmentsRequest): + The initial request object. + response (google.cloud.compute_v1.types.CommitmentList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionCommitmentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.CommitmentList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Commitment]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/__init__.py new file mode 100644 index 000000000..4e65fa42b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionCommitmentsTransport +from .rest import RegionCommitmentsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionCommitmentsTransport]] +_transport_registry['rest'] = RegionCommitmentsRestTransport + +__all__ = ( + 'RegionCommitmentsTransport', + 'RegionCommitmentsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/base.py new file mode 100644 index 000000000..136349be6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionCommitmentsTransport(abc.ABC): + """Abstract transport class for RegionCommitments.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListRegionCommitmentsRequest], + Union[ + compute.CommitmentAggregatedList, + Awaitable[compute.CommitmentAggregatedList] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionCommitmentRequest], + Union[ + compute.Commitment, + Awaitable[compute.Commitment] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionCommitmentRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionCommitmentsRequest], + Union[ + compute.CommitmentList, + Awaitable[compute.CommitmentList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionCommitmentsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/rest.py new file mode 100644 index 000000000..fc6570347 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_commitments/transports/rest.py @@ -0,0 +1,537 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionCommitmentsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionCommitmentsRestTransport(RegionCommitmentsTransport): + """REST backend transport for RegionCommitments. + + The RegionCommitments API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListRegionCommitmentsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.CommitmentAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListRegionCommitmentsRequest): + The request object. A request message for + RegionCommitments.AggregatedList. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.CommitmentAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/commitments', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListRegionCommitmentsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListRegionCommitmentsRequest.to_json( + compute.AggregatedListRegionCommitmentsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.CommitmentAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionCommitmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Commitment: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionCommitmentRequest): + The request object. A request message for + RegionCommitments.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Commitment: + Represents a regional Commitment + resource. Creating a commitment resource + means that you are purchasing a + committed use contract with an explicit + start and end time. You can create + commitments based on vCPUs and memory + usage and receive discounted rates. For + full details, read Signing Up for + Committed Use Discounts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments/{commitment}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "commitment", + "commitment" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionCommitmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionCommitmentRequest.to_json( + compute.GetRegionCommitmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Commitment.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionCommitmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionCommitmentRequest): + The request object. A request message for + RegionCommitments.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments', + 'body': 'commitment_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionCommitmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Commitment.to_json( + compute.Commitment( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionCommitmentRequest.to_json( + compute.InsertRegionCommitmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionCommitmentsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.CommitmentList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionCommitmentsRequest): + The request object. A request message for + RegionCommitments.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.CommitmentList: + Contains a list of Commitment + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionCommitmentsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionCommitmentsRequest.to_json( + compute.ListRegionCommitmentsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.CommitmentList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListRegionCommitmentsRequest], + compute.CommitmentAggregatedList]: + return self._aggregated_list + @ property + def get(self) -> Callable[ + [compute.GetRegionCommitmentRequest], + compute.Commitment]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionCommitmentRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionCommitmentsRequest], + compute.CommitmentList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'RegionCommitmentsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/__init__.py new file mode 100644 index 000000000..8a29d6ddd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionDiskTypesClient + +__all__ = ( + 'RegionDiskTypesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/client.py new file mode 100644 index 000000000..24c38f586 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/client.py @@ -0,0 +1,536 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_disk_types import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionDiskTypesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionDiskTypesRestTransport + + +class RegionDiskTypesClientMeta(type): + """Metaclass for the RegionDiskTypes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionDiskTypesTransport]] + _transport_registry["rest"] = RegionDiskTypesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionDiskTypesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionDiskTypesClient(metaclass=RegionDiskTypesClientMeta): + """The RegionDiskTypes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionDiskTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionDiskTypesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionDiskTypesTransport: + """Returns the transport used by the client instance. + + Returns: + RegionDiskTypesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionDiskTypesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region disk types client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionDiskTypesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionDiskTypesTransport): + # transport is a RegionDiskTypesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def get(self, + request: Union[compute.GetRegionDiskTypeRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk_type: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.DiskType: + r"""Returns the specified regional disk type. Gets a list + of available disk types by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionDiskTypeRequest, dict]): + The request object. A request message for + RegionDiskTypes.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk_type (str): + Name of the disk type to return. + This corresponds to the ``disk_type`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.DiskType: + Represents a Disk Type resource. Google Compute Engine + has two Disk Type resources: \* + [Regional](/compute/docs/reference/rest/v1/regionDiskTypes) + \* [Zonal](/compute/docs/reference/rest/v1/diskTypes) + You can choose from a variety of disk types based on + your needs. For more information, read Storage options. + The diskTypes resource represents disk types for a zonal + persistent disk. For more information, read Zonal + persistent disks. The regionDiskTypes resource + represents disk types for a regional persistent disk. + For more information, read Regional persistent disks. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk_type]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionDiskTypeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionDiskTypeRequest): + request = compute.GetRegionDiskTypeRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk_type is not None: + request.disk_type = disk_type + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionDiskTypesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of regional disk types available to + the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionDiskTypesRequest, dict]): + The request object. A request message for + RegionDiskTypes.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_disk_types.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionDiskTypesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionDiskTypesRequest): + request = compute.ListRegionDiskTypesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionDiskTypesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/pagers.py new file mode 100644 index 000000000..81f775fc3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionDiskTypeList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionDiskTypeList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionDiskTypeList], + request: compute.ListRegionDiskTypesRequest, + response: compute.RegionDiskTypeList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionDiskTypesRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionDiskTypeList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionDiskTypesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionDiskTypeList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.DiskType]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/__init__.py new file mode 100644 index 000000000..0ba337c95 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionDiskTypesTransport +from .rest import RegionDiskTypesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionDiskTypesTransport]] +_transport_registry['rest'] = RegionDiskTypesRestTransport + +__all__ = ( + 'RegionDiskTypesTransport', + 'RegionDiskTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/base.py new file mode 100644 index 000000000..2ffd722d6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/base.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionDiskTypesTransport(abc.ABC): + """Abstract transport class for RegionDiskTypes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionDiskTypeRequest], + Union[ + compute.DiskType, + Awaitable[compute.DiskType] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionDiskTypesRequest], + Union[ + compute.RegionDiskTypeList, + Awaitable[compute.RegionDiskTypeList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionDiskTypesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/rest.py new file mode 100644 index 000000000..48a75c6e9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disk_types/transports/rest.py @@ -0,0 +1,329 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionDiskTypesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionDiskTypesRestTransport(RegionDiskTypesTransport): + """REST backend transport for RegionDiskTypes. + + The RegionDiskTypes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _get(self, + request: compute.GetRegionDiskTypeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DiskType: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionDiskTypeRequest): + The request object. A request message for + RegionDiskTypes.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DiskType: + Represents a Disk Type resource. Google Compute Engine + has two Disk Type resources: \* + `Regional `__ + \* `Zonal `__ + You can choose from a variety of disk types based on + your needs. For more information, read Storage options. + The diskTypes resource represents disk types for a zonal + persistent disk. For more information, read Zonal + persistent disks. The regionDiskTypes resource + represents disk types for a regional persistent disk. + For more information, read Regional persistent disks. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/diskTypes/{disk_type}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk_type", + "diskType" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionDiskTypeRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionDiskTypeRequest.to_json( + compute.GetRegionDiskTypeRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DiskType.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionDiskTypesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionDiskTypeList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionDiskTypesRequest): + The request object. A request message for + RegionDiskTypes.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionDiskTypeList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/diskTypes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionDiskTypesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionDiskTypesRequest.to_json( + compute.ListRegionDiskTypesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionDiskTypeList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def get(self) -> Callable[ + [compute.GetRegionDiskTypeRequest], + compute.DiskType]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListRegionDiskTypesRequest], + compute.RegionDiskTypeList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'RegionDiskTypesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/__init__.py new file mode 100644 index 000000000..789567f0a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionDisksClient + +__all__ = ( + 'RegionDisksClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/client.py new file mode 100644 index 000000000..6594fd9f8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/client.py @@ -0,0 +1,1605 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_disks import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionDisksTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionDisksRestTransport + + +class RegionDisksClientMeta(type): + """Metaclass for the RegionDisks client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionDisksTransport]] + _transport_registry["rest"] = RegionDisksRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionDisksTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionDisksClient(metaclass=RegionDisksClientMeta): + """The RegionDisks API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionDisksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionDisksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionDisksTransport: + """Returns the transport used by the client instance. + + Returns: + RegionDisksTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionDisksTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region disks client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionDisksTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionDisksTransport): + # transport is a RegionDisksTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_resource_policies(self, + request: Union[compute.AddResourcePoliciesRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk: str = None, + region_disks_add_resource_policies_request_resource: compute.RegionDisksAddResourcePoliciesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds existing resource policies to a regional disk. + You can only add one policy which will be applied to + this disk for scheduling snapshot creation. + + Args: + request (Union[google.cloud.compute_v1.types.AddResourcePoliciesRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.AddResourcePolicies. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + The disk name for this request. + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_disks_add_resource_policies_request_resource (google.cloud.compute_v1.types.RegionDisksAddResourcePoliciesRequest): + The body resource for this request + This corresponds to the ``region_disks_add_resource_policies_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk, region_disks_add_resource_policies_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddResourcePoliciesRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddResourcePoliciesRegionDiskRequest): + request = compute.AddResourcePoliciesRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk is not None: + request.disk = disk + if region_disks_add_resource_policies_request_resource is not None: + request.region_disks_add_resource_policies_request_resource = region_disks_add_resource_policies_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_resource_policies] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_snapshot(self, + request: Union[compute.CreateSnapshotRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk: str = None, + snapshot_resource: compute.Snapshot = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a snapshot of this regional disk. + + Args: + request (Union[google.cloud.compute_v1.types.CreateSnapshotRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.CreateSnapshot. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + Name of the regional persistent disk + to snapshot. + + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_resource (google.cloud.compute_v1.types.Snapshot): + The body resource for this request + This corresponds to the ``snapshot_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk, snapshot_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.CreateSnapshotRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.CreateSnapshotRegionDiskRequest): + request = compute.CreateSnapshotRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk is not None: + request.disk = disk + if snapshot_resource is not None: + request.snapshot_resource = snapshot_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_snapshot] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified regional persistent disk. + Deleting a regional disk removes all the replicas of its + data permanently and is irreversible. However, deleting + a disk does not delete any snapshots previously made + from the disk. You must separately delete snapshots. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + Name of the regional persistent disk + to delete. + + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionDiskRequest): + request = compute.DeleteRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk is not None: + request.disk = disk + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Disk: + r"""Returns a specified regional persistent disk. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + Name of the regional persistent disk + to return. + + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Disk: + Represents a Persistent Disk resource. Google Compute + Engine has two Disk resources: \* + [Zonal](/compute/docs/reference/rest/v1/disks) \* + [Regional](/compute/docs/reference/rest/v1/regionDisks) + Persistent disks are required for running your VM + instances. Create both boot and non-boot (data) + persistent disks. For more information, read Persistent + Disks. For more storage options, read Storage options. + The disks resource represents a zonal persistent disk. + For more information, read Zonal persistent disks. The + regionDisks resource represents a regional persistent + disk. For more information, read Regional resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionDiskRequest): + request = compute.GetRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk is not None: + request.disk = disk + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyRegionDiskRequest): + request = compute.GetIamPolicyRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk_resource: compute.Disk = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a persistent regional disk in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk_resource (google.cloud.compute_v1.types.Disk): + The body resource for this request + This corresponds to the ``disk_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionDiskRequest): + request = compute.InsertRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk_resource is not None: + request.disk_resource = disk_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionDisksRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of persistent disks contained + within the specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionDisksRequest, dict]): + The request object. A request message for + RegionDisks.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_disks.pagers.ListPager: + A list of Disk resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionDisksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionDisksRequest): + request = compute.ListRegionDisksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_resource_policies(self, + request: Union[compute.RemoveResourcePoliciesRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk: str = None, + region_disks_remove_resource_policies_request_resource: compute.RegionDisksRemoveResourcePoliciesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes resource policies from a regional disk. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveResourcePoliciesRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.RemoveResourcePolicies. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + The disk name for this request. + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_disks_remove_resource_policies_request_resource (google.cloud.compute_v1.types.RegionDisksRemoveResourcePoliciesRequest): + The body resource for this request + This corresponds to the ``region_disks_remove_resource_policies_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk, region_disks_remove_resource_policies_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveResourcePoliciesRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveResourcePoliciesRegionDiskRequest): + request = compute.RemoveResourcePoliciesRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk is not None: + request.disk = disk + if region_disks_remove_resource_policies_request_resource is not None: + request.region_disks_remove_resource_policies_request_resource = region_disks_remove_resource_policies_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_resource_policies] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def resize(self, + request: Union[compute.ResizeRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + disk: str = None, + region_disks_resize_request_resource: compute.RegionDisksResizeRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Resizes the specified regional persistent disk. + + Args: + request (Union[google.cloud.compute_v1.types.ResizeRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.Resize. See the method description for + details. + project (str): + The project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + disk (str): + Name of the regional persistent disk. + This corresponds to the ``disk`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_disks_resize_request_resource (google.cloud.compute_v1.types.RegionDisksResizeRequest): + The body resource for this request + This corresponds to the ``region_disks_resize_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, disk, region_disks_resize_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ResizeRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ResizeRegionDiskRequest): + request = compute.ResizeRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if disk is not None: + request.disk = disk + if region_disks_resize_request_resource is not None: + request.region_disks_resize_request_resource = region_disks_resize_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resize] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_policy_request_resource: compute.RegionSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + This corresponds to the ``region_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyRegionDiskRequest): + request = compute.SetIamPolicyRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_policy_request_resource is not None: + request.region_set_policy_request_resource = region_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_labels_request_resource: compute.RegionSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on the target regional disk. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.SetLabels. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_labels_request_resource (google.cloud.compute_v1.types.RegionSetLabelsRequest): + The body resource for this request + This corresponds to the ``region_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsRegionDiskRequest): + request = compute.SetLabelsRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_labels_request_resource is not None: + request.region_set_labels_request_resource = region_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsRegionDiskRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsRegionDiskRequest, dict]): + The request object. A request message for + RegionDisks.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsRegionDiskRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsRegionDiskRequest): + request = compute.TestIamPermissionsRegionDiskRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionDisksClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/pagers.py new file mode 100644 index 000000000..0ae5cd324 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.DiskList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.DiskList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.DiskList], + request: compute.ListRegionDisksRequest, + response: compute.DiskList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionDisksRequest): + The initial request object. + response (google.cloud.compute_v1.types.DiskList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionDisksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.DiskList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Disk]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/__init__.py new file mode 100644 index 000000000..3614ea386 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionDisksTransport +from .rest import RegionDisksRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionDisksTransport]] +_transport_registry['rest'] = RegionDisksRestTransport + +__all__ = ( + 'RegionDisksTransport', + 'RegionDisksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/base.py new file mode 100644 index 000000000..ae74cd02c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/base.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionDisksTransport(abc.ABC): + """Abstract transport class for RegionDisks.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_resource_policies: gapic_v1.method.wrap_method( + self.add_resource_policies, + default_timeout=None, + client_info=client_info, + ), + self.create_snapshot: gapic_v1.method.wrap_method( + self.create_snapshot, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.remove_resource_policies: gapic_v1.method.wrap_method( + self.remove_resource_policies, + default_timeout=None, + client_info=client_info, + ), + self.resize: gapic_v1.method.wrap_method( + self.resize, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_resource_policies(self) -> Callable[ + [compute.AddResourcePoliciesRegionDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def create_snapshot(self) -> Callable[ + [compute.CreateSnapshotRegionDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionDiskRequest], + Union[ + compute.Disk, + Awaitable[compute.Disk] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyRegionDiskRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionDisksRequest], + Union[ + compute.DiskList, + Awaitable[compute.DiskList] + ]]: + raise NotImplementedError() + + @property + def remove_resource_policies(self) -> Callable[ + [compute.RemoveResourcePoliciesRegionDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def resize(self) -> Callable[ + [compute.ResizeRegionDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyRegionDiskRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsRegionDiskRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsRegionDiskRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionDisksTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/rest.py new file mode 100644 index 000000000..3fb8abb25 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_disks/transports/rest.py @@ -0,0 +1,1563 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionDisksTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionDisksRestTransport(RegionDisksTransport): + """REST backend transport for RegionDisks. + + The RegionDisks API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_resource_policies(self, + request: compute.AddResourcePoliciesRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add resource policies method over HTTP. + + Args: + request (~.compute.AddResourcePoliciesRegionDiskRequest): + The request object. A request message for + RegionDisks.AddResourcePolicies. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies', + 'body': 'region_disks_add_resource_policies_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.AddResourcePoliciesRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionDisksAddResourcePoliciesRequest.to_json( + compute.RegionDisksAddResourcePoliciesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddResourcePoliciesRegionDiskRequest.to_json( + compute.AddResourcePoliciesRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _create_snapshot(self, + request: compute.CreateSnapshotRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the create snapshot method over HTTP. + + Args: + request (~.compute.CreateSnapshotRegionDiskRequest): + The request object. A request message for + RegionDisks.CreateSnapshot. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{disk}/createSnapshot', + 'body': 'snapshot_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.CreateSnapshotRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Snapshot.to_json( + compute.Snapshot( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.CreateSnapshotRegionDiskRequest.to_json( + compute.CreateSnapshotRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionDiskRequest): + The request object. A request message for + RegionDisks.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{disk}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionDiskRequest.to_json( + compute.DeleteRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Disk: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionDiskRequest): + The request object. A request message for + RegionDisks.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Disk: + Represents a Persistent Disk resource. Google Compute + Engine has two Disk resources: \* + `Zonal `__ \* + `Regional `__ + Persistent disks are required for running your VM + instances. Create both boot and non-boot (data) + persistent disks. For more information, read Persistent + Disks. For more storage options, read Storage options. + The disks resource represents a zonal persistent disk. + For more information, read Zonal persistent disks. The + regionDisks resource represents a regional persistent + disk. For more information, read Regional resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{disk}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionDiskRequest.to_json( + compute.GetRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Disk.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyRegionDiskRequest): + The request object. A request message for + RegionDisks.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyRegionDiskRequest.to_json( + compute.GetIamPolicyRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionDiskRequest): + The request object. A request message for + RegionDisks.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks', + 'body': 'disk_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Disk.to_json( + compute.Disk( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionDiskRequest.to_json( + compute.InsertRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionDisksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DiskList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionDisksRequest): + The request object. A request message for + RegionDisks.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DiskList: + A list of Disk resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionDisksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionDisksRequest.to_json( + compute.ListRegionDisksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DiskList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_resource_policies(self, + request: compute.RemoveResourcePoliciesRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove resource policies method over HTTP. + + Args: + request (~.compute.RemoveResourcePoliciesRegionDiskRequest): + The request object. A request message for + RegionDisks.RemoveResourcePolicies. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies', + 'body': 'region_disks_remove_resource_policies_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.RemoveResourcePoliciesRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionDisksRemoveResourcePoliciesRequest.to_json( + compute.RegionDisksRemoveResourcePoliciesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveResourcePoliciesRegionDiskRequest.to_json( + compute.RemoveResourcePoliciesRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _resize(self, + request: compute.ResizeRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the resize method over HTTP. + + Args: + request (~.compute.ResizeRegionDiskRequest): + The request object. A request message for + RegionDisks.Resize. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{disk}/resize', + 'body': 'region_disks_resize_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "disk", + "disk" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ResizeRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionDisksResizeRequest.to_json( + compute.RegionDisksResizeRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ResizeRegionDiskRequest.to_json( + compute.ResizeRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyRegionDiskRequest): + The request object. A request message for + RegionDisks.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{resource}/setIamPolicy', + 'body': 'region_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetPolicyRequest.to_json( + compute.RegionSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyRegionDiskRequest.to_json( + compute.SetIamPolicyRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsRegionDiskRequest): + The request object. A request message for + RegionDisks.SetLabels. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{resource}/setLabels', + 'body': 'region_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetLabelsRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetLabelsRequest.to_json( + compute.RegionSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsRegionDiskRequest.to_json( + compute.SetLabelsRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsRegionDiskRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsRegionDiskRequest): + The request object. A request message for + RegionDisks.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/disks/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsRegionDiskRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsRegionDiskRequest.to_json( + compute.TestIamPermissionsRegionDiskRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_resource_policies(self) -> Callable[ + [compute.AddResourcePoliciesRegionDiskRequest], + compute.Operation]: + return self._add_resource_policies + @ property + def create_snapshot(self) -> Callable[ + [compute.CreateSnapshotRegionDiskRequest], + compute.Operation]: + return self._create_snapshot + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionDiskRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionDiskRequest], + compute.Disk]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyRegionDiskRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertRegionDiskRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionDisksRequest], + compute.DiskList]: + return self._list + @ property + def remove_resource_policies(self) -> Callable[ + [compute.RemoveResourcePoliciesRegionDiskRequest], + compute.Operation]: + return self._remove_resource_policies + @ property + def resize(self) -> Callable[ + [compute.ResizeRegionDiskRequest], + compute.Operation]: + return self._resize + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyRegionDiskRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsRegionDiskRequest], + compute.Operation]: + return self._set_labels + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsRegionDiskRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'RegionDisksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/__init__.py new file mode 100644 index 000000000..0154198a6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionHealthCheckServicesClient + +__all__ = ( + 'RegionHealthCheckServicesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/client.py new file mode 100644 index 000000000..d8c5a4216 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/client.py @@ -0,0 +1,841 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_health_check_services import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionHealthCheckServicesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionHealthCheckServicesRestTransport + + +class RegionHealthCheckServicesClientMeta(type): + """Metaclass for the RegionHealthCheckServices client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionHealthCheckServicesTransport]] + _transport_registry["rest"] = RegionHealthCheckServicesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionHealthCheckServicesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionHealthCheckServicesClient(metaclass=RegionHealthCheckServicesClientMeta): + """The RegionHealthCheckServices API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionHealthCheckServicesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionHealthCheckServicesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionHealthCheckServicesTransport: + """Returns the transport used by the client instance. + + Returns: + RegionHealthCheckServicesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionHealthCheckServicesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region health check services client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionHealthCheckServicesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionHealthCheckServicesTransport): + # transport is a RegionHealthCheckServicesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionHealthCheckServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check_service: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified regional HealthCheckService. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionHealthCheckServiceRequest, dict]): + The request object. A request message for + RegionHealthCheckServices.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_service (str): + Name of the HealthCheckService to + delete. The name must be 1-63 characters + long, and comply with RFC1035. + + This corresponds to the ``health_check_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionHealthCheckServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionHealthCheckServiceRequest): + request = compute.DeleteRegionHealthCheckServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check_service is not None: + request.health_check_service = health_check_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionHealthCheckServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check_service: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.HealthCheckService: + r"""Returns the specified regional HealthCheckService + resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionHealthCheckServiceRequest, dict]): + The request object. A request message for + RegionHealthCheckServices.Get. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_service (str): + Name of the HealthCheckService to + update. The name must be 1-63 characters + long, and comply with RFC1035. + + This corresponds to the ``health_check_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.HealthCheckService: + Represents a Health-Check as a + Service resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check_service]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionHealthCheckServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionHealthCheckServiceRequest): + request = compute.GetRegionHealthCheckServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check_service is not None: + request.health_check_service = health_check_service + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionHealthCheckServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check_service_resource: compute.HealthCheckService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a regional HealthCheckService resource in the + specified project and region using the data included in + the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionHealthCheckServiceRequest, dict]): + The request object. A request message for + RegionHealthCheckServices.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_service_resource (google.cloud.compute_v1.types.HealthCheckService): + The body resource for this request + This corresponds to the ``health_check_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionHealthCheckServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionHealthCheckServiceRequest): + request = compute.InsertRegionHealthCheckServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check_service_resource is not None: + request.health_check_service_resource = health_check_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionHealthCheckServicesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists all the HealthCheckService resources that have + been configured for the specified project in the given + region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionHealthCheckServicesRequest, dict]): + The request object. A request message for + RegionHealthCheckServices.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_health_check_services.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionHealthCheckServicesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionHealthCheckServicesRequest): + request = compute.ListRegionHealthCheckServicesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchRegionHealthCheckServiceRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check_service: str = None, + health_check_service_resource: compute.HealthCheckService = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified regional HealthCheckService + resource with the data included in the request. This + method supports PATCH semantics and uses the JSON merge + patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRegionHealthCheckServiceRequest, dict]): + The request object. A request message for + RegionHealthCheckServices.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_service (str): + Name of the HealthCheckService to + update. The name must be 1-63 characters + long, and comply with RFC1035. + + This corresponds to the ``health_check_service`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_service_resource (google.cloud.compute_v1.types.HealthCheckService): + The body resource for this request + This corresponds to the ``health_check_service_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check_service, health_check_service_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRegionHealthCheckServiceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRegionHealthCheckServiceRequest): + request = compute.PatchRegionHealthCheckServiceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check_service is not None: + request.health_check_service = health_check_service + if health_check_service_resource is not None: + request.health_check_service_resource = health_check_service_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionHealthCheckServicesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/pagers.py new file mode 100644 index 000000000..b65b66552 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.HealthCheckServicesList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.HealthCheckServicesList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.HealthCheckServicesList], + request: compute.ListRegionHealthCheckServicesRequest, + response: compute.HealthCheckServicesList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionHealthCheckServicesRequest): + The initial request object. + response (google.cloud.compute_v1.types.HealthCheckServicesList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionHealthCheckServicesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.HealthCheckServicesList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.HealthCheckService]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/__init__.py new file mode 100644 index 000000000..0db3f4fe8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionHealthCheckServicesTransport +from .rest import RegionHealthCheckServicesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionHealthCheckServicesTransport]] +_transport_registry['rest'] = RegionHealthCheckServicesRestTransport + +__all__ = ( + 'RegionHealthCheckServicesTransport', + 'RegionHealthCheckServicesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/base.py new file mode 100644 index 000000000..dfdcbf825 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionHealthCheckServicesTransport(abc.ABC): + """Abstract transport class for RegionHealthCheckServices.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionHealthCheckServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionHealthCheckServiceRequest], + Union[ + compute.HealthCheckService, + Awaitable[compute.HealthCheckService] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionHealthCheckServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionHealthCheckServicesRequest], + Union[ + compute.HealthCheckServicesList, + Awaitable[compute.HealthCheckServicesList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchRegionHealthCheckServiceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionHealthCheckServicesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/rest.py new file mode 100644 index 000000000..5afe400a3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_check_services/transports/rest.py @@ -0,0 +1,673 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionHealthCheckServicesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionHealthCheckServicesRestTransport(RegionHealthCheckServicesTransport): + """REST backend transport for RegionHealthCheckServices. + + The RegionHealthCheckServices API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionHealthCheckServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionHealthCheckServiceRequest): + The request object. A request message for + RegionHealthCheckServices.Delete. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check_service", + "healthCheckService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionHealthCheckServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionHealthCheckServiceRequest.to_json( + compute.DeleteRegionHealthCheckServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionHealthCheckServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.HealthCheckService: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionHealthCheckServiceRequest): + The request object. A request message for + RegionHealthCheckServices.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.HealthCheckService: + Represents a Health-Check as a + Service resource. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check_service", + "healthCheckService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionHealthCheckServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionHealthCheckServiceRequest.to_json( + compute.GetRegionHealthCheckServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.HealthCheckService.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionHealthCheckServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionHealthCheckServiceRequest): + The request object. A request message for + RegionHealthCheckServices.Insert. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthCheckServices', + 'body': 'health_check_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionHealthCheckServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheckService.to_json( + compute.HealthCheckService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionHealthCheckServiceRequest.to_json( + compute.InsertRegionHealthCheckServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionHealthCheckServicesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.HealthCheckServicesList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionHealthCheckServicesRequest): + The request object. A request message for + RegionHealthCheckServices.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.HealthCheckServicesList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthCheckServices', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionHealthCheckServicesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionHealthCheckServicesRequest.to_json( + compute.ListRegionHealthCheckServicesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.HealthCheckServicesList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchRegionHealthCheckServiceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchRegionHealthCheckServiceRequest): + The request object. A request message for + RegionHealthCheckServices.Patch. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}', + 'body': 'health_check_service_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check_service", + "healthCheckService" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchRegionHealthCheckServiceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheckService.to_json( + compute.HealthCheckService( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRegionHealthCheckServiceRequest.to_json( + compute.PatchRegionHealthCheckServiceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionHealthCheckServiceRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionHealthCheckServiceRequest], + compute.HealthCheckService]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionHealthCheckServiceRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionHealthCheckServicesRequest], + compute.HealthCheckServicesList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchRegionHealthCheckServiceRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'RegionHealthCheckServicesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/__init__.py new file mode 100644 index 000000000..841476f7d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionHealthChecksClient + +__all__ = ( + 'RegionHealthChecksClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/client.py new file mode 100644 index 000000000..771529171 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/client.py @@ -0,0 +1,964 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_health_checks import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionHealthChecksTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionHealthChecksRestTransport + + +class RegionHealthChecksClientMeta(type): + """Metaclass for the RegionHealthChecks client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionHealthChecksTransport]] + _transport_registry["rest"] = RegionHealthChecksRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionHealthChecksTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionHealthChecksClient(metaclass=RegionHealthChecksClientMeta): + """The RegionHealthChecks API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionHealthChecksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionHealthChecksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionHealthChecksTransport: + """Returns the transport used by the client instance. + + Returns: + RegionHealthChecksTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionHealthChecksTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region health checks client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionHealthChecksTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionHealthChecksTransport): + # transport is a RegionHealthChecksTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionHealthCheckRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified HealthCheck resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionHealthCheckRequest, dict]): + The request object. A request message for + RegionHealthChecks.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + delete. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionHealthCheckRequest): + request = compute.DeleteRegionHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check is not None: + request.health_check = health_check + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionHealthCheckRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.HealthCheck: + r"""Returns the specified HealthCheck resource. Gets a + list of available health checks by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionHealthCheckRequest, dict]): + The request object. A request message for + RegionHealthChecks.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + return. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.HealthCheck: + Represents a Health Check resource. Google Compute + Engine has two Health Check resources: \* + [Global](/compute/docs/reference/rest/v1/healthChecks) + \* + [Regional](/compute/docs/reference/rest/v1/regionHealthChecks) + Internal HTTP(S) load balancers must use regional health + checks (compute.v1.regionHealthChecks). Traffic Director + must use global health checks (compute.v1.HealthChecks). + Internal TCP/UDP load balancers can use either regional + or global health checks (compute.v1.regionHealthChecks + or compute.v1.HealthChecks). External HTTP(S), TCP + proxy, and SSL proxy load balancers as well as managed + instance group auto-healing must use global health + checks (compute.v1.HealthChecks). Backend service-based + network load balancers must use regional health checks + (compute.v1.regionHealthChecks). Target pool-based + network load balancers must use legacy HTTP health + checks (compute.v1.httpHealthChecks). For more + information, see Health checks overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionHealthCheckRequest): + request = compute.GetRegionHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check is not None: + request.health_check = health_check + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionHealthCheckRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check_resource: compute.HealthCheck = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a HealthCheck resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionHealthCheckRequest, dict]): + The request object. A request message for + RegionHealthChecks.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + This corresponds to the ``health_check_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionHealthCheckRequest): + request = compute.InsertRegionHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check_resource is not None: + request.health_check_resource = health_check_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionHealthChecksRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of HealthCheck resources available + to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionHealthChecksRequest, dict]): + The request object. A request message for + RegionHealthChecks.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_health_checks.pagers.ListPager: + Contains a list of HealthCheck + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionHealthChecksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionHealthChecksRequest): + request = compute.ListRegionHealthChecksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchRegionHealthCheckRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check: str = None, + health_check_resource: compute.HealthCheck = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates a HealthCheck resource in the specified + project using the data included in the request. This + method supports PATCH semantics and uses the JSON merge + patch format and processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRegionHealthCheckRequest, dict]): + The request object. A request message for + RegionHealthChecks.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + patch. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + This corresponds to the ``health_check_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check, health_check_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRegionHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRegionHealthCheckRequest): + request = compute.PatchRegionHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check is not None: + request.health_check = health_check + if health_check_resource is not None: + request.health_check_resource = health_check_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateRegionHealthCheckRequest, dict] = None, + *, + project: str = None, + region: str = None, + health_check: str = None, + health_check_resource: compute.HealthCheck = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates a HealthCheck resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateRegionHealthCheckRequest, dict]): + The request object. A request message for + RegionHealthChecks.Update. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check (str): + Name of the HealthCheck resource to + update. + + This corresponds to the ``health_check`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + This corresponds to the ``health_check_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, health_check, health_check_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateRegionHealthCheckRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateRegionHealthCheckRequest): + request = compute.UpdateRegionHealthCheckRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if health_check is not None: + request.health_check = health_check + if health_check_resource is not None: + request.health_check_resource = health_check_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionHealthChecksClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/pagers.py new file mode 100644 index 000000000..0df4fbfa4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.HealthCheckList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.HealthCheckList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.HealthCheckList], + request: compute.ListRegionHealthChecksRequest, + response: compute.HealthCheckList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionHealthChecksRequest): + The initial request object. + response (google.cloud.compute_v1.types.HealthCheckList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionHealthChecksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.HealthCheckList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.HealthCheck]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/__init__.py new file mode 100644 index 000000000..dd5028021 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionHealthChecksTransport +from .rest import RegionHealthChecksRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionHealthChecksTransport]] +_transport_registry['rest'] = RegionHealthChecksRestTransport + +__all__ = ( + 'RegionHealthChecksTransport', + 'RegionHealthChecksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/base.py new file mode 100644 index 000000000..599af3767 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionHealthChecksTransport(abc.ABC): + """Abstract transport class for RegionHealthChecks.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionHealthCheckRequest], + Union[ + compute.HealthCheck, + Awaitable[compute.HealthCheck] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionHealthChecksRequest], + Union[ + compute.HealthCheckList, + Awaitable[compute.HealthCheckList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchRegionHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateRegionHealthCheckRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionHealthChecksTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/rest.py new file mode 100644 index 000000000..4567499f0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_health_checks/transports/rest.py @@ -0,0 +1,815 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionHealthChecksTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionHealthChecksRestTransport(RegionHealthChecksTransport): + """REST backend transport for RegionHealthChecks. + + The RegionHealthChecks API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionHealthCheckRequest): + The request object. A request message for + RegionHealthChecks.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionHealthCheckRequest.to_json( + compute.DeleteRegionHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.HealthCheck: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionHealthCheckRequest): + The request object. A request message for + RegionHealthChecks.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.HealthCheck: + Represents a Health Check resource. Google Compute + Engine has two Health Check resources: \* + `Global `__ + \* + `Regional `__ + Internal HTTP(S) load balancers must use regional health + checks (``compute.v1.regionHealthChecks``). Traffic + Director must use global health checks + (``compute.v1.HealthChecks``). Internal TCP/UDP load + balancers can use either regional or global health + checks (``compute.v1.regionHealthChecks`` or + ``compute.v1.HealthChecks``). External HTTP(S), TCP + proxy, and SSL proxy load balancers as well as managed + instance group auto-healing must use global health + checks (``compute.v1.HealthChecks``). Backend + service-based network load balancers must use regional + health checks (``compute.v1.regionHealthChecks``). + Target pool-based network load balancers must use legacy + HTTP health checks (``compute.v1.httpHealthChecks``). + For more information, see Health checks overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionHealthCheckRequest.to_json( + compute.GetRegionHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.HealthCheck.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionHealthCheckRequest): + The request object. A request message for + RegionHealthChecks.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthChecks', + 'body': 'health_check_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheck.to_json( + compute.HealthCheck( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionHealthCheckRequest.to_json( + compute.InsertRegionHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionHealthChecksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.HealthCheckList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionHealthChecksRequest): + The request object. A request message for + RegionHealthChecks.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.HealthCheckList: + Contains a list of HealthCheck + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthChecks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionHealthChecksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionHealthChecksRequest.to_json( + compute.ListRegionHealthChecksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.HealthCheckList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchRegionHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchRegionHealthCheckRequest): + The request object. A request message for + RegionHealthChecks.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}', + 'body': 'health_check_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchRegionHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheck.to_json( + compute.HealthCheck( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRegionHealthCheckRequest.to_json( + compute.PatchRegionHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateRegionHealthCheckRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateRegionHealthCheckRequest): + The request object. A request message for + RegionHealthChecks.Update. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}', + 'body': 'health_check_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "health_check", + "healthCheck" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.UpdateRegionHealthCheckRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.HealthCheck.to_json( + compute.HealthCheck( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateRegionHealthCheckRequest.to_json( + compute.UpdateRegionHealthCheckRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionHealthCheckRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionHealthCheckRequest], + compute.HealthCheck]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionHealthCheckRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionHealthChecksRequest], + compute.HealthCheckList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchRegionHealthCheckRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateRegionHealthCheckRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'RegionHealthChecksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/__init__.py new file mode 100644 index 000000000..33c2b35e6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionInstanceGroupManagersClient + +__all__ = ( + 'RegionInstanceGroupManagersClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/client.py new file mode 100644 index 000000000..43fe939fe --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/client.py @@ -0,0 +1,2405 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_instance_group_managers import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionInstanceGroupManagersTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionInstanceGroupManagersRestTransport + + +class RegionInstanceGroupManagersClientMeta(type): + """Metaclass for the RegionInstanceGroupManagers client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionInstanceGroupManagersTransport]] + _transport_registry["rest"] = RegionInstanceGroupManagersRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionInstanceGroupManagersTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionInstanceGroupManagersClient(metaclass=RegionInstanceGroupManagersClientMeta): + """The RegionInstanceGroupManagers API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionInstanceGroupManagersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionInstanceGroupManagersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionInstanceGroupManagersTransport: + """Returns the transport used by the client instance. + + Returns: + RegionInstanceGroupManagersTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionInstanceGroupManagersTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region instance group managers client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionInstanceGroupManagersTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionInstanceGroupManagersTransport): + # transport is a RegionInstanceGroupManagersTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def abandon_instances(self, + request: Union[compute.AbandonInstancesRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_managers_abandon_instances_request_resource: compute.RegionInstanceGroupManagersAbandonInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Flags the specified instances to be immediately + removed from the managed instance group. Abandoning an + instance does not delete the instance, but it does + remove the instance from any target pools that are + applied by the managed instance group. This method + reduces the targetSize of the managed instance group by + the number of instances that you abandon. This operation + is marked as DONE when the action is scheduled even if + the instances have not yet been removed from the group. + You must separately verify the status of the abandoning + action with the listmanagedinstances method. If the + group is part of a backend service that has enabled + connection draining, it can take up to 60 seconds after + the connection draining duration has elapsed before the + VM instance is removed or deleted. You can specify a + maximum of 1000 instances with this method per request. + + Args: + request (Union[google.cloud.compute_v1.types.AbandonInstancesRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.AbandonInstances. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + Name of the managed instance group. + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_managers_abandon_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersAbandonInstancesRequest): + The body resource for this request + This corresponds to the ``region_instance_group_managers_abandon_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_managers_abandon_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AbandonInstancesRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AbandonInstancesRegionInstanceGroupManagerRequest): + request = compute.AbandonInstancesRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_managers_abandon_instances_request_resource is not None: + request.region_instance_group_managers_abandon_instances_request_resource = region_instance_group_managers_abandon_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.abandon_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def apply_updates_to_instances(self, + request: Union[compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_managers_apply_updates_request_resource: compute.RegionInstanceGroupManagersApplyUpdatesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Apply updates to selected instances the managed + instance group. + + Args: + request (Union[google.cloud.compute_v1.types.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.ApplyUpdatesToInstances. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request, should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group, should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_managers_apply_updates_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersApplyUpdatesRequest): + The body resource for this request + This corresponds to the ``region_instance_group_managers_apply_updates_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_managers_apply_updates_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest): + request = compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_managers_apply_updates_request_resource is not None: + request.region_instance_group_managers_apply_updates_request_resource = region_instance_group_managers_apply_updates_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.apply_updates_to_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_instances(self, + request: Union[compute.CreateInstancesRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_managers_create_instances_request_resource: compute.RegionInstanceGroupManagersCreateInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates instances with per-instance configs in this + regional managed instance group. Instances are created + using the current instance template. The create + instances operation is marked DONE if the + createInstances request is successful. The underlying + actions take additional time. You must separately verify + the status of the creating or actions with the + listmanagedinstances method. + + Args: + request (Union[google.cloud.compute_v1.types.CreateInstancesRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.CreateInstances. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region where the + managed instance group is located. It + should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_managers_create_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersCreateInstancesRequest): + The body resource for this request + This corresponds to the ``region_instance_group_managers_create_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_managers_create_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.CreateInstancesRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.CreateInstancesRegionInstanceGroupManagerRequest): + request = compute.CreateInstancesRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_managers_create_instances_request_resource is not None: + request.region_instance_group_managers_create_instances_request_resource = region_instance_group_managers_create_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified managed instance group and all + of the instances in that group. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + Name of the managed instance group to + delete. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionInstanceGroupManagerRequest): + request = compute.DeleteRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_instances(self, + request: Union[compute.DeleteInstancesRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_managers_delete_instances_request_resource: compute.RegionInstanceGroupManagersDeleteInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Flags the specified instances in the managed instance + group to be immediately deleted. The instances are also + removed from any target pools of which they were a + member. This method reduces the targetSize of the + managed instance group by the number of instances that + you delete. The deleteInstances operation is marked DONE + if the deleteInstances request is successful. The + underlying actions take additional time. You must + separately verify the status of the deleting action with + the listmanagedinstances method. If the group is part of + a backend service that has enabled connection draining, + it can take up to 60 seconds after the connection + draining duration has elapsed before the VM instance is + removed or deleted. You can specify a maximum of 1000 + instances with this method per request. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteInstancesRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.DeleteInstances. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + Name of the managed instance group. + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_managers_delete_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersDeleteInstancesRequest): + The body resource for this request + This corresponds to the ``region_instance_group_managers_delete_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_managers_delete_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteInstancesRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteInstancesRegionInstanceGroupManagerRequest): + request = compute.DeleteInstancesRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_managers_delete_instances_request_resource is not None: + request.region_instance_group_managers_delete_instances_request_resource = region_instance_group_managers_delete_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_per_instance_configs(self, + request: Union[compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_manager_delete_instance_config_req_resource: compute.RegionInstanceGroupManagerDeleteInstanceConfigReq = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes selected per-instance configs for the managed + instance group. + + Args: + request (Union[google.cloud.compute_v1.types.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.DeletePerInstanceConfigs. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request, should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_manager_delete_instance_config_req_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagerDeleteInstanceConfigReq): + The body resource for this request + This corresponds to the ``region_instance_group_manager_delete_instance_config_req_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_manager_delete_instance_config_req_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest): + request = compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_manager_delete_instance_config_req_resource is not None: + request.region_instance_group_manager_delete_instance_config_req_resource = region_instance_group_manager_delete_instance_config_req_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InstanceGroupManager: + r"""Returns all of the details about the specified + managed instance group. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.Get. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + Name of the managed instance group to + return. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InstanceGroupManager: + Represents a Managed Instance Group + resource. An instance group is a + collection of VM instances that you can + manage as a single entity. For more + information, read Instance groups. For + zonal Managed Instance Group, use the + instanceGroupManagers resource. For + regional Managed Instance Group, use the + regionInstanceGroupManagers resource. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionInstanceGroupManagerRequest): + request = compute.GetRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager_resource: compute.InstanceGroupManager = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a managed instance group using the + information that you specify in the request. After the + group is created, instances in the group are created + using the specified instance template. This operation is + marked as DONE when the group is created even if the + instances in the group have not yet been created. You + must separately verify the status of the individual + instances with the listmanagedinstances method. A + regional managed instance group can contain up to 2000 + instances. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + This corresponds to the ``instance_group_manager_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionInstanceGroupManagerRequest): + request = compute.InsertRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager_resource is not None: + request.instance_group_manager_resource = instance_group_manager_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of managed instance groups that + are contained within the specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionInstanceGroupManagersRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_instance_group_managers.pagers.ListPager: + Contains a list of managed instance + groups. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionInstanceGroupManagersRequest): + request = compute.ListRegionInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_errors(self, + request: Union[compute.ListErrorsRegionInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListErrorsPager: + r"""Lists all errors thrown by actions on instances for a + given regional managed instance group. The filter and + orderBy query parameters are not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListErrorsRegionInstanceGroupManagersRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.ListErrors. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. This should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance group. It must be a + string that meets the requirements in RFC1035, or an + unsigned long integer: must match regexp pattern: + (?:`a-z `__?)|1-9{0,19}. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_instance_group_managers.pagers.ListErrorsPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListErrorsRegionInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListErrorsRegionInstanceGroupManagersRequest): + request = compute.ListErrorsRegionInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_errors] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListErrorsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_managed_instances(self, + request: Union[compute.ListManagedInstancesRegionInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListManagedInstancesPager: + r"""Lists the instances in the managed instance group and + instances that are scheduled to be created. The list + includes any current actions that the group has + scheduled for its instances. The orderBy query parameter + is not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListManagedInstancesRegionInstanceGroupManagersRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.ListManagedInstances. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_instance_group_managers.pagers.ListManagedInstancesPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListManagedInstancesRegionInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListManagedInstancesRegionInstanceGroupManagersRequest): + request = compute.ListManagedInstancesRegionInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_managed_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListManagedInstancesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_per_instance_configs(self, + request: Union[compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPerInstanceConfigsPager: + r"""Lists all of the per-instance configs defined for the + managed instance group. The orderBy query parameter is + not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListPerInstanceConfigsRegionInstanceGroupManagersRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.ListPerInstanceConfigs. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request, should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_instance_group_managers.pagers.ListPerInstanceConfigsPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest): + request = compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPerInstanceConfigsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + instance_group_manager_resource: compute.InstanceGroupManager = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates a managed instance group using the + information that you specify in the request. This + operation is marked as DONE when the group is patched + even if the instances in the group are still in the + process of being patched. You must separately verify the + status of the individual instances with the + listmanagedinstances method. This method supports PATCH + semantics and uses the JSON merge patch format and + processing rules. If you update your group to specify a + new template or instance configuration, it's possible + that your intended specification for each VM in the + group is different from the current state of that VM. To + learn how to apply an updated configuration to the VMs + in a MIG, see Updating instances in a MIG. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.Patch. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the instance group + manager. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + This corresponds to the ``instance_group_manager_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, instance_group_manager_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRegionInstanceGroupManagerRequest): + request = compute.PatchRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if instance_group_manager_resource is not None: + request.instance_group_manager_resource = instance_group_manager_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch_per_instance_configs(self, + request: Union[compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_manager_patch_instance_config_req_resource: compute.RegionInstanceGroupManagerPatchInstanceConfigReq = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Inserts or patches per-instance configs for the + managed instance group. perInstanceConfig.name serves as + a key used to distinguish whether to perform insert or + patch. + + Args: + request (Union[google.cloud.compute_v1.types.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.PatchPerInstanceConfigs. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request, should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_manager_patch_instance_config_req_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagerPatchInstanceConfigReq): + The body resource for this request + This corresponds to the ``region_instance_group_manager_patch_instance_config_req_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_manager_patch_instance_config_req_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest): + request = compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_manager_patch_instance_config_req_resource is not None: + request.region_instance_group_manager_patch_instance_config_req_resource = region_instance_group_manager_patch_instance_config_req_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def recreate_instances(self, + request: Union[compute.RecreateInstancesRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_managers_recreate_request_resource: compute.RegionInstanceGroupManagersRecreateRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Flags the specified VM instances in the managed + instance group to be immediately recreated. Each + instance is recreated using the group's current + configuration. This operation is marked as DONE when the + flag is set even if the instances have not yet been + recreated. You must separately verify the status of each + instance by checking its currentAction field; for more + information, see Checking the status of managed + instances. If the group is part of a backend service + that has enabled connection draining, it can take up to + 60 seconds after the connection draining duration has + elapsed before the VM instance is removed or deleted. + You can specify a maximum of 1000 instances with this + method per request. + + Args: + request (Union[google.cloud.compute_v1.types.RecreateInstancesRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.RecreateInstances. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + Name of the managed instance group. + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_managers_recreate_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersRecreateRequest): + The body resource for this request + This corresponds to the ``region_instance_group_managers_recreate_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_managers_recreate_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RecreateInstancesRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RecreateInstancesRegionInstanceGroupManagerRequest): + request = compute.RecreateInstancesRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_managers_recreate_request_resource is not None: + request.region_instance_group_managers_recreate_request_resource = region_instance_group_managers_recreate_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.recreate_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def resize(self, + request: Union[compute.ResizeRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + size: int = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the intended size of the managed instance + group. If you increase the size, the group creates new + instances using the current instance template. If you + decrease the size, the group deletes one or more + instances. The resize operation is marked DONE if the + resize request is successful. The underlying actions + take additional time. You must separately verify the + status of the creating or deleting actions with the + listmanagedinstances method. If the group is part of a + backend service that has enabled connection draining, it + can take up to 60 seconds after the connection draining + duration has elapsed before the VM instance is removed + or deleted. + + Args: + request (Union[google.cloud.compute_v1.types.ResizeRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.Resize. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + Name of the managed instance group. + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + size (int): + Number of instances that should exist + in this instance group manager. + + This corresponds to the ``size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, size]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ResizeRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ResizeRegionInstanceGroupManagerRequest): + request = compute.ResizeRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if size is not None: + request.size = size + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resize] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_instance_template(self, + request: Union[compute.SetInstanceTemplateRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_managers_set_template_request_resource: compute.RegionInstanceGroupManagersSetTemplateRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the instance template to use when creating new + instances or recreating instances in this group. + Existing instances are not affected. + + Args: + request (Union[google.cloud.compute_v1.types.SetInstanceTemplateRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.SetInstanceTemplate. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_managers_set_template_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersSetTemplateRequest): + The body resource for this request + This corresponds to the ``region_instance_group_managers_set_template_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_managers_set_template_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetInstanceTemplateRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetInstanceTemplateRegionInstanceGroupManagerRequest): + request = compute.SetInstanceTemplateRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_managers_set_template_request_resource is not None: + request.region_instance_group_managers_set_template_request_resource = region_instance_group_managers_set_template_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_instance_template] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_target_pools(self, + request: Union[compute.SetTargetPoolsRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_managers_set_target_pools_request_resource: compute.RegionInstanceGroupManagersSetTargetPoolsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Modifies the target pools to which all new instances + in this group are assigned. Existing instances in the + group are not affected. + + Args: + request (Union[google.cloud.compute_v1.types.SetTargetPoolsRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.SetTargetPools. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + Name of the managed instance group. + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_managers_set_target_pools_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersSetTargetPoolsRequest): + The body resource for this request + This corresponds to the ``region_instance_group_managers_set_target_pools_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_managers_set_target_pools_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetTargetPoolsRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetTargetPoolsRegionInstanceGroupManagerRequest): + request = compute.SetTargetPoolsRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_managers_set_target_pools_request_resource is not None: + request.region_instance_group_managers_set_target_pools_request_resource = region_instance_group_managers_set_target_pools_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_target_pools] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_per_instance_configs(self, + request: Union[compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group_manager: str = None, + region_instance_group_manager_update_instance_config_req_resource: compute.RegionInstanceGroupManagerUpdateInstanceConfigReq = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Inserts or updates per-instance configs for the + managed instance group. perInstanceConfig.name serves as + a key used to distinguish whether to perform insert or + patch. + + Args: + request (Union[google.cloud.compute_v1.types.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest, dict]): + The request object. A request message for + RegionInstanceGroupManagers.UpdatePerInstanceConfigs. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request, should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group_manager (str): + The name of the managed instance + group. It should conform to RFC1035. + + This corresponds to the ``instance_group_manager`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_group_manager_update_instance_config_req_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagerUpdateInstanceConfigReq): + The body resource for this request + This corresponds to the ``region_instance_group_manager_update_instance_config_req_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group_manager, region_instance_group_manager_update_instance_config_req_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest): + request = compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group_manager is not None: + request.instance_group_manager = instance_group_manager + if region_instance_group_manager_update_instance_config_req_resource is not None: + request.region_instance_group_manager_update_instance_config_req_resource = region_instance_group_manager_update_instance_config_req_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_per_instance_configs] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionInstanceGroupManagersClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/pagers.py new file mode 100644 index 000000000..014072618 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/pagers.py @@ -0,0 +1,254 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagerList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagerList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionInstanceGroupManagerList], + request: compute.ListRegionInstanceGroupManagersRequest, + response: compute.RegionInstanceGroupManagerList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionInstanceGroupManagerList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionInstanceGroupManagerList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceGroupManager]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListErrorsPager: + """A pager for iterating through ``list_errors`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagersListErrorsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListErrors`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagersListErrorsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionInstanceGroupManagersListErrorsResponse], + request: compute.ListErrorsRegionInstanceGroupManagersRequest, + response: compute.RegionInstanceGroupManagersListErrorsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListErrorsRegionInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionInstanceGroupManagersListErrorsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListErrorsRegionInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionInstanceGroupManagersListErrorsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceManagedByIgmError]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListManagedInstancesPager: + """A pager for iterating through ``list_managed_instances`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagersListInstancesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``managed_instances`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListManagedInstances`` requests and continue to iterate + through the ``managed_instances`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagersListInstancesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionInstanceGroupManagersListInstancesResponse], + request: compute.ListManagedInstancesRegionInstanceGroupManagersRequest, + response: compute.RegionInstanceGroupManagersListInstancesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListManagedInstancesRegionInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionInstanceGroupManagersListInstancesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListManagedInstancesRegionInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionInstanceGroupManagersListInstancesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ManagedInstance]: + for page in self.pages: + yield from page.managed_instances + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPerInstanceConfigsPager: + """A pager for iterating through ``list_per_instance_configs`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagersListInstanceConfigsResp` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListPerInstanceConfigs`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionInstanceGroupManagersListInstanceConfigsResp` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionInstanceGroupManagersListInstanceConfigsResp], + request: compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest, + response: compute.RegionInstanceGroupManagersListInstanceConfigsResp, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListPerInstanceConfigsRegionInstanceGroupManagersRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionInstanceGroupManagersListInstanceConfigsResp): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionInstanceGroupManagersListInstanceConfigsResp]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.PerInstanceConfig]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/__init__.py new file mode 100644 index 000000000..b93cb98fa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionInstanceGroupManagersTransport +from .rest import RegionInstanceGroupManagersRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionInstanceGroupManagersTransport]] +_transport_registry['rest'] = RegionInstanceGroupManagersRestTransport + +__all__ = ( + 'RegionInstanceGroupManagersTransport', + 'RegionInstanceGroupManagersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/base.py new file mode 100644 index 000000000..0a40691be --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/base.py @@ -0,0 +1,399 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionInstanceGroupManagersTransport(abc.ABC): + """Abstract transport class for RegionInstanceGroupManagers.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.abandon_instances: gapic_v1.method.wrap_method( + self.abandon_instances, + default_timeout=None, + client_info=client_info, + ), + self.apply_updates_to_instances: gapic_v1.method.wrap_method( + self.apply_updates_to_instances, + default_timeout=None, + client_info=client_info, + ), + self.create_instances: gapic_v1.method.wrap_method( + self.create_instances, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.delete_instances: gapic_v1.method.wrap_method( + self.delete_instances, + default_timeout=None, + client_info=client_info, + ), + self.delete_per_instance_configs: gapic_v1.method.wrap_method( + self.delete_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_errors: gapic_v1.method.wrap_method( + self.list_errors, + default_timeout=None, + client_info=client_info, + ), + self.list_managed_instances: gapic_v1.method.wrap_method( + self.list_managed_instances, + default_timeout=None, + client_info=client_info, + ), + self.list_per_instance_configs: gapic_v1.method.wrap_method( + self.list_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.patch_per_instance_configs: gapic_v1.method.wrap_method( + self.patch_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + self.recreate_instances: gapic_v1.method.wrap_method( + self.recreate_instances, + default_timeout=None, + client_info=client_info, + ), + self.resize: gapic_v1.method.wrap_method( + self.resize, + default_timeout=None, + client_info=client_info, + ), + self.set_instance_template: gapic_v1.method.wrap_method( + self.set_instance_template, + default_timeout=None, + client_info=client_info, + ), + self.set_target_pools: gapic_v1.method.wrap_method( + self.set_target_pools, + default_timeout=None, + client_info=client_info, + ), + self.update_per_instance_configs: gapic_v1.method.wrap_method( + self.update_per_instance_configs, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def abandon_instances(self) -> Callable[ + [compute.AbandonInstancesRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def apply_updates_to_instances(self) -> Callable[ + [compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def create_instances(self) -> Callable[ + [compute.CreateInstancesRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_instances(self) -> Callable[ + [compute.DeleteInstancesRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_per_instance_configs(self) -> Callable[ + [compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionInstanceGroupManagerRequest], + Union[ + compute.InstanceGroupManager, + Awaitable[compute.InstanceGroupManager] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionInstanceGroupManagersRequest], + Union[ + compute.RegionInstanceGroupManagerList, + Awaitable[compute.RegionInstanceGroupManagerList] + ]]: + raise NotImplementedError() + + @property + def list_errors(self) -> Callable[ + [compute.ListErrorsRegionInstanceGroupManagersRequest], + Union[ + compute.RegionInstanceGroupManagersListErrorsResponse, + Awaitable[compute.RegionInstanceGroupManagersListErrorsResponse] + ]]: + raise NotImplementedError() + + @property + def list_managed_instances(self) -> Callable[ + [compute.ListManagedInstancesRegionInstanceGroupManagersRequest], + Union[ + compute.RegionInstanceGroupManagersListInstancesResponse, + Awaitable[compute.RegionInstanceGroupManagersListInstancesResponse] + ]]: + raise NotImplementedError() + + @property + def list_per_instance_configs(self) -> Callable[ + [compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest], + Union[ + compute.RegionInstanceGroupManagersListInstanceConfigsResp, + Awaitable[compute.RegionInstanceGroupManagersListInstanceConfigsResp] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def patch_per_instance_configs(self) -> Callable[ + [compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def recreate_instances(self) -> Callable[ + [compute.RecreateInstancesRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def resize(self) -> Callable[ + [compute.ResizeRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_instance_template(self) -> Callable[ + [compute.SetInstanceTemplateRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_target_pools(self) -> Callable[ + [compute.SetTargetPoolsRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update_per_instance_configs(self) -> Callable[ + [compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionInstanceGroupManagersTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/rest.py new file mode 100644 index 000000000..3f8ea496d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_group_managers/transports/rest.py @@ -0,0 +1,2317 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionInstanceGroupManagersTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionInstanceGroupManagersRestTransport(RegionInstanceGroupManagersTransport): + """REST backend transport for RegionInstanceGroupManagers. + + The RegionInstanceGroupManagers API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _abandon_instances(self, + request: compute.AbandonInstancesRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the abandon instances method over HTTP. + + Args: + request (~.compute.AbandonInstancesRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.AbandonInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/abandonInstances', + 'body': 'region_instance_group_managers_abandon_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.AbandonInstancesRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagersAbandonInstancesRequest.to_json( + compute.RegionInstanceGroupManagersAbandonInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AbandonInstancesRegionInstanceGroupManagerRequest.to_json( + compute.AbandonInstancesRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _apply_updates_to_instances(self, + request: compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the apply updates to + instances method over HTTP. + + Args: + request (~.compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.ApplyUpdatesToInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/applyUpdatesToInstances', + 'body': 'region_instance_group_managers_apply_updates_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagersApplyUpdatesRequest.to_json( + compute.RegionInstanceGroupManagersApplyUpdatesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest.to_json( + compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _create_instances(self, + request: compute.CreateInstancesRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the create instances method over HTTP. + + Args: + request (~.compute.CreateInstancesRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.CreateInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/createInstances', + 'body': 'region_instance_group_managers_create_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.CreateInstancesRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagersCreateInstancesRequest.to_json( + compute.RegionInstanceGroupManagersCreateInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.CreateInstancesRegionInstanceGroupManagerRequest.to_json( + compute.CreateInstancesRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.Delete. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionInstanceGroupManagerRequest.to_json( + compute.DeleteRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_instances(self, + request: compute.DeleteInstancesRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete instances method over HTTP. + + Args: + request (~.compute.DeleteInstancesRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.DeleteInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/deleteInstances', + 'body': 'region_instance_group_managers_delete_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteInstancesRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagersDeleteInstancesRequest.to_json( + compute.RegionInstanceGroupManagersDeleteInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteInstancesRegionInstanceGroupManagerRequest.to_json( + compute.DeleteInstancesRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete_per_instance_configs(self, + request: compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete per instance + configs method over HTTP. + + Args: + request (~.compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.DeletePerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/deletePerInstanceConfigs', + 'body': 'region_instance_group_manager_delete_instance_config_req_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagerDeleteInstanceConfigReq.to_json( + compute.RegionInstanceGroupManagerDeleteInstanceConfigReq( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest.to_json( + compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroupManager: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroupManager: + Represents a Managed Instance Group + resource. An instance group is a + collection of VM instances that you can + manage as a single entity. For more + information, read Instance groups. For + zonal Managed Instance Group, use the + instanceGroupManagers resource. For + regional Managed Instance Group, use the + regionInstanceGroupManagers resource. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionInstanceGroupManagerRequest.to_json( + compute.GetRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroupManager.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.Insert. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers', + 'body': 'instance_group_manager_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManager.to_json( + compute.InstanceGroupManager( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionInstanceGroupManagerRequest.to_json( + compute.InsertRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionInstanceGroupManagerList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionInstanceGroupManagersRequest): + The request object. A request message for + RegionInstanceGroupManagers.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionInstanceGroupManagerList: + Contains a list of managed instance + groups. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionInstanceGroupManagersRequest.to_json( + compute.ListRegionInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionInstanceGroupManagerList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_errors(self, + request: compute.ListErrorsRegionInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionInstanceGroupManagersListErrorsResponse: + r"""Call the list errors method over HTTP. + + Args: + request (~.compute.ListErrorsRegionInstanceGroupManagersRequest): + The request object. A request message for + RegionInstanceGroupManagers.ListErrors. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionInstanceGroupManagersListErrorsResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listErrors', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListErrorsRegionInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListErrorsRegionInstanceGroupManagersRequest.to_json( + compute.ListErrorsRegionInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionInstanceGroupManagersListErrorsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_managed_instances(self, + request: compute.ListManagedInstancesRegionInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionInstanceGroupManagersListInstancesResponse: + r"""Call the list managed instances method over HTTP. + + Args: + request (~.compute.ListManagedInstancesRegionInstanceGroupManagersRequest): + The request object. A request message for + RegionInstanceGroupManagers.ListManagedInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionInstanceGroupManagersListInstancesResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listManagedInstances', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListManagedInstancesRegionInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListManagedInstancesRegionInstanceGroupManagersRequest.to_json( + compute.ListManagedInstancesRegionInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionInstanceGroupManagersListInstancesResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_per_instance_configs(self, + request: compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionInstanceGroupManagersListInstanceConfigsResp: + r"""Call the list per instance configs method over HTTP. + + Args: + request (~.compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest): + The request object. A request message for + RegionInstanceGroupManagers.ListPerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionInstanceGroupManagersListInstanceConfigsResp: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listPerInstanceConfigs', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest.to_json( + compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionInstanceGroupManagersListInstanceConfigsResp.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.Patch. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}', + 'body': 'instance_group_manager_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceGroupManager.to_json( + compute.InstanceGroupManager( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRegionInstanceGroupManagerRequest.to_json( + compute.PatchRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch_per_instance_configs(self, + request: compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch per instance + configs method over HTTP. + + Args: + request (~.compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.PatchPerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/patchPerInstanceConfigs', + 'body': 'region_instance_group_manager_patch_instance_config_req_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagerPatchInstanceConfigReq.to_json( + compute.RegionInstanceGroupManagerPatchInstanceConfigReq( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest.to_json( + compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _recreate_instances(self, + request: compute.RecreateInstancesRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the recreate instances method over HTTP. + + Args: + request (~.compute.RecreateInstancesRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.RecreateInstances. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/recreateInstances', + 'body': 'region_instance_group_managers_recreate_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.RecreateInstancesRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagersRecreateRequest.to_json( + compute.RegionInstanceGroupManagersRecreateRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RecreateInstancesRegionInstanceGroupManagerRequest.to_json( + compute.RecreateInstancesRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _resize(self, + request: compute.ResizeRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the resize method over HTTP. + + Args: + request (~.compute.ResizeRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.Resize. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/resize', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "size", + "size" + ), + ] + + request_kwargs = compute.ResizeRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ResizeRegionInstanceGroupManagerRequest.to_json( + compute.ResizeRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_instance_template(self, + request: compute.SetInstanceTemplateRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set instance template method over HTTP. + + Args: + request (~.compute.SetInstanceTemplateRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.SetInstanceTemplate. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/setInstanceTemplate', + 'body': 'region_instance_group_managers_set_template_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.SetInstanceTemplateRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagersSetTemplateRequest.to_json( + compute.RegionInstanceGroupManagersSetTemplateRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetInstanceTemplateRegionInstanceGroupManagerRequest.to_json( + compute.SetInstanceTemplateRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_target_pools(self, + request: compute.SetTargetPoolsRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set target pools method over HTTP. + + Args: + request (~.compute.SetTargetPoolsRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.SetTargetPools. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/setTargetPools', + 'body': 'region_instance_group_managers_set_target_pools_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.SetTargetPoolsRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagersSetTargetPoolsRequest.to_json( + compute.RegionInstanceGroupManagersSetTargetPoolsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetTargetPoolsRegionInstanceGroupManagerRequest.to_json( + compute.SetTargetPoolsRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update_per_instance_configs(self, + request: compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update per instance + configs method over HTTP. + + Args: + request (~.compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest): + The request object. A request message for + RegionInstanceGroupManagers.UpdatePerInstanceConfigs. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/updatePerInstanceConfigs', + 'body': 'region_instance_group_manager_update_instance_config_req_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group_manager", + "instanceGroupManager" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupManagerUpdateInstanceConfigReq.to_json( + compute.RegionInstanceGroupManagerUpdateInstanceConfigReq( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest.to_json( + compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def abandon_instances(self) -> Callable[ + [compute.AbandonInstancesRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._abandon_instances + @ property + def apply_updates_to_instances(self) -> Callable[ + [compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._apply_updates_to_instances + @ property + def create_instances(self) -> Callable[ + [compute.CreateInstancesRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._create_instances + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._delete + @ property + def delete_instances(self) -> Callable[ + [compute.DeleteInstancesRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._delete_instances + @ property + def delete_per_instance_configs(self) -> Callable[ + [compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._delete_per_instance_configs + @ property + def get(self) -> Callable[ + [compute.GetRegionInstanceGroupManagerRequest], + compute.InstanceGroupManager]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionInstanceGroupManagersRequest], + compute.RegionInstanceGroupManagerList]: + return self._list + @ property + def list_errors(self) -> Callable[ + [compute.ListErrorsRegionInstanceGroupManagersRequest], + compute.RegionInstanceGroupManagersListErrorsResponse]: + return self._list_errors + @ property + def list_managed_instances(self) -> Callable[ + [compute.ListManagedInstancesRegionInstanceGroupManagersRequest], + compute.RegionInstanceGroupManagersListInstancesResponse]: + return self._list_managed_instances + @ property + def list_per_instance_configs(self) -> Callable[ + [compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest], + compute.RegionInstanceGroupManagersListInstanceConfigsResp]: + return self._list_per_instance_configs + @ property + def patch(self) -> Callable[ + [compute.PatchRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._patch + @ property + def patch_per_instance_configs(self) -> Callable[ + [compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._patch_per_instance_configs + @ property + def recreate_instances(self) -> Callable[ + [compute.RecreateInstancesRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._recreate_instances + @ property + def resize(self) -> Callable[ + [compute.ResizeRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._resize + @ property + def set_instance_template(self) -> Callable[ + [compute.SetInstanceTemplateRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._set_instance_template + @ property + def set_target_pools(self) -> Callable[ + [compute.SetTargetPoolsRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._set_target_pools + @ property + def update_per_instance_configs(self) -> Callable[ + [compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest], + compute.Operation]: + return self._update_per_instance_configs + def close(self): + self._session.close() + + +__all__=( + 'RegionInstanceGroupManagersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/__init__.py new file mode 100644 index 000000000..aaba60285 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionInstanceGroupsClient + +__all__ = ( + 'RegionInstanceGroupsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/client.py new file mode 100644 index 000000000..da86ffbc2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/client.py @@ -0,0 +1,758 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_instance_groups import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionInstanceGroupsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionInstanceGroupsRestTransport + + +class RegionInstanceGroupsClientMeta(type): + """Metaclass for the RegionInstanceGroups client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionInstanceGroupsTransport]] + _transport_registry["rest"] = RegionInstanceGroupsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionInstanceGroupsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionInstanceGroupsClient(metaclass=RegionInstanceGroupsClientMeta): + """The RegionInstanceGroups API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionInstanceGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionInstanceGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionInstanceGroupsTransport: + """Returns the transport used by the client instance. + + Returns: + RegionInstanceGroupsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionInstanceGroupsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region instance groups client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionInstanceGroupsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionInstanceGroupsTransport): + # transport is a RegionInstanceGroupsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def get(self, + request: Union[compute.GetRegionInstanceGroupRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.InstanceGroup: + r"""Returns the specified instance group resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionInstanceGroupRequest, dict]): + The request object. A request message for + RegionInstanceGroups.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + Name of the instance group resource + to return. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.InstanceGroup: + Represents an Instance Group + resource. Instance Groups can be used to + configure a target for load balancing. + Instance groups can either be managed or + unmanaged. To create managed instance + groups, use the instanceGroupManager or + regionInstanceGroupManager resource + instead. Use zonal unmanaged instance + groups if you need to apply load + balancing to groups of heterogeneous + instances or if you need to manage the + instances yourself. You cannot create + regional unmanaged instance groups. For + more information, read Instance groups. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionInstanceGroupRequest): + request = compute.GetRegionInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group is not None: + request.instance_group = instance_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionInstanceGroupsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of instance group resources + contained within the specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionInstanceGroupsRequest, dict]): + The request object. A request message for + RegionInstanceGroups.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_instance_groups.pagers.ListPager: + Contains a list of InstanceGroup + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionInstanceGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionInstanceGroupsRequest): + request = compute.ListRegionInstanceGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_instances(self, + request: Union[compute.ListInstancesRegionInstanceGroupsRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group: str = None, + region_instance_groups_list_instances_request_resource: compute.RegionInstanceGroupsListInstancesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListInstancesPager: + r"""Lists the instances in the specified instance group + and displays information about the named ports. + Depending on the specified options, this method can list + all instances or only the instances that are running. + The orderBy query parameter is not supported. + + Args: + request (Union[google.cloud.compute_v1.types.ListInstancesRegionInstanceGroupsRequest, dict]): + The request object. A request message for + RegionInstanceGroups.ListInstances. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + Name of the regional instance group + for which we want to list the instances. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_groups_list_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupsListInstancesRequest): + The body resource for this request + This corresponds to the ``region_instance_groups_list_instances_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_instance_groups.pagers.ListInstancesPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group, region_instance_groups_list_instances_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListInstancesRegionInstanceGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListInstancesRegionInstanceGroupsRequest): + request = compute.ListInstancesRegionInstanceGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group is not None: + request.instance_group = instance_group + if region_instance_groups_list_instances_request_resource is not None: + request.region_instance_groups_list_instances_request_resource = region_instance_groups_list_instances_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_instances] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListInstancesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_named_ports(self, + request: Union[compute.SetNamedPortsRegionInstanceGroupRequest, dict] = None, + *, + project: str = None, + region: str = None, + instance_group: str = None, + region_instance_groups_set_named_ports_request_resource: compute.RegionInstanceGroupsSetNamedPortsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the named ports for the specified regional + instance group. + + Args: + request (Union[google.cloud.compute_v1.types.SetNamedPortsRegionInstanceGroupRequest, dict]): + The request object. A request message for + RegionInstanceGroups.SetNamedPorts. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_group (str): + The name of the regional instance + group where the named ports are updated. + + This corresponds to the ``instance_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_instance_groups_set_named_ports_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupsSetNamedPortsRequest): + The body resource for this request + This corresponds to the ``region_instance_groups_set_named_ports_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, instance_group, region_instance_groups_set_named_ports_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetNamedPortsRegionInstanceGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetNamedPortsRegionInstanceGroupRequest): + request = compute.SetNamedPortsRegionInstanceGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if instance_group is not None: + request.instance_group = instance_group + if region_instance_groups_set_named_ports_request_resource is not None: + request.region_instance_groups_set_named_ports_request_resource = region_instance_groups_set_named_ports_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_named_ports] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionInstanceGroupsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/pagers.py new file mode 100644 index 000000000..0694e6b52 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/pagers.py @@ -0,0 +1,136 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionInstanceGroupList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionInstanceGroupList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionInstanceGroupList], + request: compute.ListRegionInstanceGroupsRequest, + response: compute.RegionInstanceGroupList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionInstanceGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionInstanceGroupList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionInstanceGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionInstanceGroupList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceGroup]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListInstancesPager: + """A pager for iterating through ``list_instances`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionInstanceGroupsListInstances` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListInstances`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionInstanceGroupsListInstances` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionInstanceGroupsListInstances], + request: compute.ListInstancesRegionInstanceGroupsRequest, + response: compute.RegionInstanceGroupsListInstances, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListInstancesRegionInstanceGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionInstanceGroupsListInstances): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListInstancesRegionInstanceGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionInstanceGroupsListInstances]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.InstanceWithNamedPorts]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/__init__.py new file mode 100644 index 000000000..ee6fb48ed --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionInstanceGroupsTransport +from .rest import RegionInstanceGroupsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionInstanceGroupsTransport]] +_transport_registry['rest'] = RegionInstanceGroupsRestTransport + +__all__ = ( + 'RegionInstanceGroupsTransport', + 'RegionInstanceGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/base.py new file mode 100644 index 000000000..dad237655 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionInstanceGroupsTransport(abc.ABC): + """Abstract transport class for RegionInstanceGroups.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_instances: gapic_v1.method.wrap_method( + self.list_instances, + default_timeout=None, + client_info=client_info, + ), + self.set_named_ports: gapic_v1.method.wrap_method( + self.set_named_ports, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionInstanceGroupRequest], + Union[ + compute.InstanceGroup, + Awaitable[compute.InstanceGroup] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionInstanceGroupsRequest], + Union[ + compute.RegionInstanceGroupList, + Awaitable[compute.RegionInstanceGroupList] + ]]: + raise NotImplementedError() + + @property + def list_instances(self) -> Callable[ + [compute.ListInstancesRegionInstanceGroupsRequest], + Union[ + compute.RegionInstanceGroupsListInstances, + Awaitable[compute.RegionInstanceGroupsListInstances] + ]]: + raise NotImplementedError() + + @property + def set_named_ports(self) -> Callable[ + [compute.SetNamedPortsRegionInstanceGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionInstanceGroupsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/rest.py new file mode 100644 index 000000000..4e72c4829 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instance_groups/transports/rest.py @@ -0,0 +1,563 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionInstanceGroupsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionInstanceGroupsRestTransport(RegionInstanceGroupsTransport): + """REST backend transport for RegionInstanceGroups. + + The RegionInstanceGroups API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _get(self, + request: compute.GetRegionInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.InstanceGroup: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionInstanceGroupRequest): + The request object. A request message for + RegionInstanceGroups.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.InstanceGroup: + Represents an Instance Group + resource. Instance Groups can be used to + configure a target for load balancing. + Instance groups can either be managed or + unmanaged. To create managed instance + groups, use the instanceGroupManager or + regionInstanceGroupManager resource + instead. Use zonal unmanaged instance + groups if you need to apply load + balancing to groups of heterogeneous + instances or if you need to manage the + instances yourself. You cannot create + regional unmanaged instance groups. For + more information, read Instance groups. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionInstanceGroupRequest.to_json( + compute.GetRegionInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.InstanceGroup.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionInstanceGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionInstanceGroupList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionInstanceGroupsRequest): + The request object. A request message for + RegionInstanceGroups.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionInstanceGroupList: + Contains a list of InstanceGroup + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionInstanceGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionInstanceGroupsRequest.to_json( + compute.ListRegionInstanceGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionInstanceGroupList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_instances(self, + request: compute.ListInstancesRegionInstanceGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionInstanceGroupsListInstances: + r"""Call the list instances method over HTTP. + + Args: + request (~.compute.ListInstancesRegionInstanceGroupsRequest): + The request object. A request message for + RegionInstanceGroups.ListInstances. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionInstanceGroupsListInstances: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}/listInstances', + 'body': 'region_instance_groups_list_instances_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListInstancesRegionInstanceGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupsListInstancesRequest.to_json( + compute.RegionInstanceGroupsListInstancesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListInstancesRegionInstanceGroupsRequest.to_json( + compute.ListInstancesRegionInstanceGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionInstanceGroupsListInstances.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_named_ports(self, + request: compute.SetNamedPortsRegionInstanceGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set named ports method over HTTP. + + Args: + request (~.compute.SetNamedPortsRegionInstanceGroupRequest): + The request object. A request message for + RegionInstanceGroups.SetNamedPorts. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}/setNamedPorts', + 'body': 'region_instance_groups_set_named_ports_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "instance_group", + "instanceGroup" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.SetNamedPortsRegionInstanceGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionInstanceGroupsSetNamedPortsRequest.to_json( + compute.RegionInstanceGroupsSetNamedPortsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetNamedPortsRegionInstanceGroupRequest.to_json( + compute.SetNamedPortsRegionInstanceGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def get(self) -> Callable[ + [compute.GetRegionInstanceGroupRequest], + compute.InstanceGroup]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListRegionInstanceGroupsRequest], + compute.RegionInstanceGroupList]: + return self._list + @ property + def list_instances(self) -> Callable[ + [compute.ListInstancesRegionInstanceGroupsRequest], + compute.RegionInstanceGroupsListInstances]: + return self._list_instances + @ property + def set_named_ports(self) -> Callable[ + [compute.SetNamedPortsRegionInstanceGroupRequest], + compute.Operation]: + return self._set_named_ports + def close(self): + self._session.close() + + +__all__=( + 'RegionInstanceGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/__init__.py new file mode 100644 index 000000000..2ae080884 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionInstancesClient + +__all__ = ( + 'RegionInstancesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/client.py new file mode 100644 index 000000000..2107c1ae0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/client.py @@ -0,0 +1,452 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.types import compute +from .transports.base import RegionInstancesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionInstancesRestTransport + + +class RegionInstancesClientMeta(type): + """Metaclass for the RegionInstances client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionInstancesTransport]] + _transport_registry["rest"] = RegionInstancesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionInstancesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionInstancesClient(metaclass=RegionInstancesClientMeta): + """The RegionInstances API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionInstancesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionInstancesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionInstancesTransport: + """Returns the transport used by the client instance. + + Returns: + RegionInstancesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionInstancesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region instances client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionInstancesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionInstancesTransport): + # transport is a RegionInstancesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def bulk_insert(self, + request: Union[compute.BulkInsertRegionInstanceRequest, dict] = None, + *, + project: str = None, + region: str = None, + bulk_insert_instance_resource_resource: compute.BulkInsertInstanceResource = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates multiple instances in a given region. Count + specifies the number of instances to create. + + Args: + request (Union[google.cloud.compute_v1.types.BulkInsertRegionInstanceRequest, dict]): + The request object. A request message for + RegionInstances.BulkInsert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + bulk_insert_instance_resource_resource (google.cloud.compute_v1.types.BulkInsertInstanceResource): + The body resource for this request + This corresponds to the ``bulk_insert_instance_resource_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, bulk_insert_instance_resource_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.BulkInsertRegionInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.BulkInsertRegionInstanceRequest): + request = compute.BulkInsertRegionInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if bulk_insert_instance_resource_resource is not None: + request.bulk_insert_instance_resource_resource = bulk_insert_instance_resource_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.bulk_insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionInstancesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/__init__.py new file mode 100644 index 000000000..f7ec5fbda --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionInstancesTransport +from .rest import RegionInstancesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionInstancesTransport]] +_transport_registry['rest'] = RegionInstancesRestTransport + +__all__ = ( + 'RegionInstancesTransport', + 'RegionInstancesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/base.py new file mode 100644 index 000000000..a768d3654 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/base.py @@ -0,0 +1,147 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionInstancesTransport(abc.ABC): + """Abstract transport class for RegionInstances.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.bulk_insert: gapic_v1.method.wrap_method( + self.bulk_insert, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def bulk_insert(self) -> Callable[ + [compute.BulkInsertRegionInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionInstancesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/rest.py new file mode 100644 index 000000000..1fb2fa034 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_instances/transports/rest.py @@ -0,0 +1,244 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionInstancesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionInstancesRestTransport(RegionInstancesTransport): + """REST backend transport for RegionInstances. + + The RegionInstances API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _bulk_insert(self, + request: compute.BulkInsertRegionInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the bulk insert method over HTTP. + + Args: + request (~.compute.BulkInsertRegionInstanceRequest): + The request object. A request message for + RegionInstances.BulkInsert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/instances/bulkInsert', + 'body': 'bulk_insert_instance_resource_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.BulkInsertRegionInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.BulkInsertInstanceResource.to_json( + compute.BulkInsertInstanceResource( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.BulkInsertRegionInstanceRequest.to_json( + compute.BulkInsertRegionInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def bulk_insert(self) -> Callable[ + [compute.BulkInsertRegionInstanceRequest], + compute.Operation]: + return self._bulk_insert + def close(self): + self._session.close() + + +__all__=( + 'RegionInstancesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/__init__.py new file mode 100644 index 000000000..cc6dcdcea --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionNetworkEndpointGroupsClient + +__all__ = ( + 'RegionNetworkEndpointGroupsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/client.py new file mode 100644 index 000000000..9a1dc7ccc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/client.py @@ -0,0 +1,745 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_network_endpoint_groups import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionNetworkEndpointGroupsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionNetworkEndpointGroupsRestTransport + + +class RegionNetworkEndpointGroupsClientMeta(type): + """Metaclass for the RegionNetworkEndpointGroups client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionNetworkEndpointGroupsTransport]] + _transport_registry["rest"] = RegionNetworkEndpointGroupsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionNetworkEndpointGroupsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionNetworkEndpointGroupsClient(metaclass=RegionNetworkEndpointGroupsClientMeta): + """The RegionNetworkEndpointGroups API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionNetworkEndpointGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionNetworkEndpointGroupsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionNetworkEndpointGroupsTransport: + """Returns the transport used by the client instance. + + Returns: + RegionNetworkEndpointGroupsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionNetworkEndpointGroupsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region network endpoint groups client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionNetworkEndpointGroupsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionNetworkEndpointGroupsTransport): + # transport is a RegionNetworkEndpointGroupsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + region: str = None, + network_endpoint_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified network endpoint group. Note + that the NEG cannot be deleted if it is configured as a + backend of a backend service. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionNetworkEndpointGroupRequest, dict]): + The request object. A request message for + RegionNetworkEndpointGroups.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group to delete. It should comply with + RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, network_endpoint_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionNetworkEndpointGroupRequest): + request = compute.DeleteRegionNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + region: str = None, + network_endpoint_group: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NetworkEndpointGroup: + r"""Returns the specified network endpoint group. Gets a + list of available network endpoint groups by making a + list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionNetworkEndpointGroupRequest, dict]): + The request object. A request message for + RegionNetworkEndpointGroups.Get. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group (str): + The name of the network endpoint + group. It should comply with RFC1035. + + This corresponds to the ``network_endpoint_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NetworkEndpointGroup: + Represents a collection of network + endpoints. A network endpoint group + (NEG) defines how a set of endpoints + should be reached, whether they are + reachable, and where they are located. + For more information about using NEGs, + see Setting up external HTTP(S) Load + Balancing with internet NEGs, Setting up + zonal NEGs, or Setting up external + HTTP(S) Load Balancing with serverless + NEGs. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, network_endpoint_group]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionNetworkEndpointGroupRequest): + request = compute.GetRegionNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if network_endpoint_group is not None: + request.network_endpoint_group = network_endpoint_group + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionNetworkEndpointGroupRequest, dict] = None, + *, + project: str = None, + region: str = None, + network_endpoint_group_resource: compute.NetworkEndpointGroup = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a network endpoint group in the specified + project using the parameters that are included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionNetworkEndpointGroupRequest, dict]): + The request object. A request message for + RegionNetworkEndpointGroups.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region where you want + to create the network endpoint group. It + should comply with RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + network_endpoint_group_resource (google.cloud.compute_v1.types.NetworkEndpointGroup): + The body resource for this request + This corresponds to the ``network_endpoint_group_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, network_endpoint_group_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionNetworkEndpointGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionNetworkEndpointGroupRequest): + request = compute.InsertRegionNetworkEndpointGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if network_endpoint_group_resource is not None: + request.network_endpoint_group_resource = network_endpoint_group_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionNetworkEndpointGroupsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of regional network endpoint + groups available to the specified project in the given + region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionNetworkEndpointGroupsRequest, dict]): + The request object. A request message for + RegionNetworkEndpointGroups.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region where the + network endpoint group is located. It + should comply with RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_network_endpoint_groups.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionNetworkEndpointGroupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionNetworkEndpointGroupsRequest): + request = compute.ListRegionNetworkEndpointGroupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionNetworkEndpointGroupsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/pagers.py new file mode 100644 index 000000000..925a06b3c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NetworkEndpointGroupList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NetworkEndpointGroupList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NetworkEndpointGroupList], + request: compute.ListRegionNetworkEndpointGroupsRequest, + response: compute.NetworkEndpointGroupList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionNetworkEndpointGroupsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NetworkEndpointGroupList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionNetworkEndpointGroupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NetworkEndpointGroupList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NetworkEndpointGroup]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/__init__.py new file mode 100644 index 000000000..ed84ce6db --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionNetworkEndpointGroupsTransport +from .rest import RegionNetworkEndpointGroupsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionNetworkEndpointGroupsTransport]] +_transport_registry['rest'] = RegionNetworkEndpointGroupsRestTransport + +__all__ = ( + 'RegionNetworkEndpointGroupsTransport', + 'RegionNetworkEndpointGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/base.py new file mode 100644 index 000000000..d5ba9bea5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionNetworkEndpointGroupsTransport(abc.ABC): + """Abstract transport class for RegionNetworkEndpointGroups.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionNetworkEndpointGroupRequest], + Union[ + compute.NetworkEndpointGroup, + Awaitable[compute.NetworkEndpointGroup] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionNetworkEndpointGroupRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionNetworkEndpointGroupsRequest], + Union[ + compute.NetworkEndpointGroupList, + Awaitable[compute.NetworkEndpointGroupList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionNetworkEndpointGroupsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/rest.py new file mode 100644 index 000000000..6ce98c213 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_network_endpoint_groups/transports/rest.py @@ -0,0 +1,560 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionNetworkEndpointGroupsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionNetworkEndpointGroupsRestTransport(RegionNetworkEndpointGroupsTransport): + """REST backend transport for RegionNetworkEndpointGroups. + + The RegionNetworkEndpointGroups API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionNetworkEndpointGroupRequest): + The request object. A request message for + RegionNetworkEndpointGroups.Delete. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups/{network_endpoint_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionNetworkEndpointGroupRequest.to_json( + compute.DeleteRegionNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroup: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionNetworkEndpointGroupRequest): + The request object. A request message for + RegionNetworkEndpointGroups.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroup: + Represents a collection of network + endpoints. A network endpoint group + (NEG) defines how a set of endpoints + should be reached, whether they are + reachable, and where they are located. + For more information about using NEGs, + see Setting up external HTTP(S) Load + Balancing with internet NEGs, Setting up + zonal NEGs, or Setting up external + HTTP(S) Load Balancing with serverless + NEGs. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups/{network_endpoint_group}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "network_endpoint_group", + "networkEndpointGroup" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionNetworkEndpointGroupRequest.to_json( + compute.GetRegionNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroup.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionNetworkEndpointGroupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionNetworkEndpointGroupRequest): + The request object. A request message for + RegionNetworkEndpointGroups.Insert. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups', + 'body': 'network_endpoint_group_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionNetworkEndpointGroupRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NetworkEndpointGroup.to_json( + compute.NetworkEndpointGroup( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionNetworkEndpointGroupRequest.to_json( + compute.InsertRegionNetworkEndpointGroupRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionNetworkEndpointGroupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NetworkEndpointGroupList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionNetworkEndpointGroupsRequest): + The request object. A request message for + RegionNetworkEndpointGroups.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NetworkEndpointGroupList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionNetworkEndpointGroupsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionNetworkEndpointGroupsRequest.to_json( + compute.ListRegionNetworkEndpointGroupsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NetworkEndpointGroupList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionNetworkEndpointGroupRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionNetworkEndpointGroupRequest], + compute.NetworkEndpointGroup]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionNetworkEndpointGroupRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionNetworkEndpointGroupsRequest], + compute.NetworkEndpointGroupList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'RegionNetworkEndpointGroupsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/__init__.py new file mode 100644 index 000000000..8c5c41b30 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionNotificationEndpointsClient + +__all__ = ( + 'RegionNotificationEndpointsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/client.py new file mode 100644 index 000000000..88d4552cb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/client.py @@ -0,0 +1,733 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_notification_endpoints import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionNotificationEndpointsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionNotificationEndpointsRestTransport + + +class RegionNotificationEndpointsClientMeta(type): + """Metaclass for the RegionNotificationEndpoints client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionNotificationEndpointsTransport]] + _transport_registry["rest"] = RegionNotificationEndpointsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionNotificationEndpointsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionNotificationEndpointsClient(metaclass=RegionNotificationEndpointsClientMeta): + """The RegionNotificationEndpoints API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionNotificationEndpointsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionNotificationEndpointsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionNotificationEndpointsTransport: + """Returns the transport used by the client instance. + + Returns: + RegionNotificationEndpointsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionNotificationEndpointsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region notification endpoints client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionNotificationEndpointsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionNotificationEndpointsTransport): + # transport is a RegionNotificationEndpointsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionNotificationEndpointRequest, dict] = None, + *, + project: str = None, + region: str = None, + notification_endpoint: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified NotificationEndpoint in the + given region + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionNotificationEndpointRequest, dict]): + The request object. A request message for + RegionNotificationEndpoints.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notification_endpoint (str): + Name of the NotificationEndpoint + resource to delete. + + This corresponds to the ``notification_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, notification_endpoint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionNotificationEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionNotificationEndpointRequest): + request = compute.DeleteRegionNotificationEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if notification_endpoint is not None: + request.notification_endpoint = notification_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionNotificationEndpointRequest, dict] = None, + *, + project: str = None, + region: str = None, + notification_endpoint: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.NotificationEndpoint: + r"""Returns the specified NotificationEndpoint resource + in the given region. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionNotificationEndpointRequest, dict]): + The request object. A request message for + RegionNotificationEndpoints.Get. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notification_endpoint (str): + Name of the NotificationEndpoint + resource to return. + + This corresponds to the ``notification_endpoint`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.NotificationEndpoint: + Represents a notification endpoint. A + notification endpoint resource defines + an endpoint to receive notifications + when there are status changes detected + by the associated health check service. + For more information, see Health checks + overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, notification_endpoint]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionNotificationEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionNotificationEndpointRequest): + request = compute.GetRegionNotificationEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if notification_endpoint is not None: + request.notification_endpoint = notification_endpoint + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionNotificationEndpointRequest, dict] = None, + *, + project: str = None, + region: str = None, + notification_endpoint_resource: compute.NotificationEndpoint = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Create a NotificationEndpoint in the specified + project in the given region using the parameters that + are included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionNotificationEndpointRequest, dict]): + The request object. A request message for + RegionNotificationEndpoints.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + notification_endpoint_resource (google.cloud.compute_v1.types.NotificationEndpoint): + The body resource for this request + This corresponds to the ``notification_endpoint_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, notification_endpoint_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionNotificationEndpointRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionNotificationEndpointRequest): + request = compute.InsertRegionNotificationEndpointRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if notification_endpoint_resource is not None: + request.notification_endpoint_resource = notification_endpoint_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionNotificationEndpointsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists the NotificationEndpoints for a project in the + given region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionNotificationEndpointsRequest, dict]): + The request object. A request message for + RegionNotificationEndpoints.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_notification_endpoints.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionNotificationEndpointsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionNotificationEndpointsRequest): + request = compute.ListRegionNotificationEndpointsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionNotificationEndpointsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/pagers.py new file mode 100644 index 000000000..bc3fc23c8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.NotificationEndpointList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.NotificationEndpointList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.NotificationEndpointList], + request: compute.ListRegionNotificationEndpointsRequest, + response: compute.NotificationEndpointList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionNotificationEndpointsRequest): + The initial request object. + response (google.cloud.compute_v1.types.NotificationEndpointList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionNotificationEndpointsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.NotificationEndpointList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.NotificationEndpoint]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/__init__.py new file mode 100644 index 000000000..dbb98556c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionNotificationEndpointsTransport +from .rest import RegionNotificationEndpointsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionNotificationEndpointsTransport]] +_transport_registry['rest'] = RegionNotificationEndpointsRestTransport + +__all__ = ( + 'RegionNotificationEndpointsTransport', + 'RegionNotificationEndpointsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/base.py new file mode 100644 index 000000000..7e9000279 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionNotificationEndpointsTransport(abc.ABC): + """Abstract transport class for RegionNotificationEndpoints.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionNotificationEndpointRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionNotificationEndpointRequest], + Union[ + compute.NotificationEndpoint, + Awaitable[compute.NotificationEndpoint] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionNotificationEndpointRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionNotificationEndpointsRequest], + Union[ + compute.NotificationEndpointList, + Awaitable[compute.NotificationEndpointList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionNotificationEndpointsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/rest.py new file mode 100644 index 000000000..1ca3293a4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_notification_endpoints/transports/rest.py @@ -0,0 +1,556 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionNotificationEndpointsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionNotificationEndpointsRestTransport(RegionNotificationEndpointsTransport): + """REST backend transport for RegionNotificationEndpoints. + + The RegionNotificationEndpoints API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionNotificationEndpointRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionNotificationEndpointRequest): + The request object. A request message for + RegionNotificationEndpoints.Delete. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/notificationEndpoints/{notification_endpoint}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "notification_endpoint", + "notificationEndpoint" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionNotificationEndpointRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionNotificationEndpointRequest.to_json( + compute.DeleteRegionNotificationEndpointRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionNotificationEndpointRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NotificationEndpoint: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionNotificationEndpointRequest): + The request object. A request message for + RegionNotificationEndpoints.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NotificationEndpoint: + Represents a notification endpoint. A + notification endpoint resource defines + an endpoint to receive notifications + when there are status changes detected + by the associated health check service. + For more information, see Health checks + overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/notificationEndpoints/{notification_endpoint}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "notification_endpoint", + "notificationEndpoint" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionNotificationEndpointRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionNotificationEndpointRequest.to_json( + compute.GetRegionNotificationEndpointRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NotificationEndpoint.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionNotificationEndpointRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionNotificationEndpointRequest): + The request object. A request message for + RegionNotificationEndpoints.Insert. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/notificationEndpoints', + 'body': 'notification_endpoint_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionNotificationEndpointRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.NotificationEndpoint.to_json( + compute.NotificationEndpoint( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionNotificationEndpointRequest.to_json( + compute.InsertRegionNotificationEndpointRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionNotificationEndpointsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.NotificationEndpointList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionNotificationEndpointsRequest): + The request object. A request message for + RegionNotificationEndpoints.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.NotificationEndpointList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/notificationEndpoints', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionNotificationEndpointsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionNotificationEndpointsRequest.to_json( + compute.ListRegionNotificationEndpointsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.NotificationEndpointList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionNotificationEndpointRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionNotificationEndpointRequest], + compute.NotificationEndpoint]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionNotificationEndpointRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionNotificationEndpointsRequest], + compute.NotificationEndpointList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'RegionNotificationEndpointsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/__init__.py new file mode 100644 index 000000000..561261a70 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionOperationsClient + +__all__ = ( + 'RegionOperationsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/client.py new file mode 100644 index 000000000..9cec2dcca --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/client.py @@ -0,0 +1,735 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_operations import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionOperationsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionOperationsRestTransport + + +class RegionOperationsClientMeta(type): + """Metaclass for the RegionOperations client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionOperationsTransport]] + _transport_registry["rest"] = RegionOperationsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionOperationsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionOperationsClient(metaclass=RegionOperationsClientMeta): + """The RegionOperations API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionOperationsTransport: + """Returns the transport used by the client instance. + + Returns: + RegionOperationsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionOperationsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region operations client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionOperationsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionOperationsTransport): + # transport is a RegionOperationsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionOperationRequest, dict] = None, + *, + project: str = None, + region: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.DeleteRegionOperationResponse: + r"""Deletes the specified region-specific Operations + resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionOperationRequest, dict]): + The request object. A request message for + RegionOperations.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + delete. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.DeleteRegionOperationResponse: + A response message for + RegionOperations.Delete. See the method + description for details. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionOperationRequest): + request = compute.DeleteRegionOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionOperationRequest, dict] = None, + *, + project: str = None, + region: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Retrieves the specified region-specific Operations + resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionOperationRequest, dict]): + The request object. A request message for + RegionOperations.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + return. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionOperationRequest): + request = compute.GetRegionOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionOperationsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of Operation resources contained + within the specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionOperationsRequest, dict]): + The request object. A request message for + RegionOperations.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_operations.pagers.ListPager: + Contains a list of Operation + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionOperationsRequest): + request = compute.ListRegionOperationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def wait(self, + request: Union[compute.WaitRegionOperationRequest, dict] = None, + *, + project: str = None, + region: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Waits for the specified Operation resource to return as ``DONE`` + or for the request to approach the 2 minute deadline, and + retrieves the specified Operation resource. This method differs + from the ``GET`` method in that it waits for no more than the + default deadline (2 minutes) and then returns the current state + of the operation, which might be ``DONE`` or still in progress. + This method is called on a best-effort basis. Specifically: - In + uncommon cases, when the server is overloaded, the request might + return before the default deadline is reached, or might return + after zero seconds. - If the default deadline is reached, there + is no guarantee that the operation is actually done when the + method returns. Be prepared to retry if the operation is not + ``DONE``. + + Args: + request (Union[google.cloud.compute_v1.types.WaitRegionOperationRequest, dict]): + The request object. A request message for + RegionOperations.Wait. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + return. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.WaitRegionOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.WaitRegionOperationRequest): + request = compute.WaitRegionOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.wait] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionOperationsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/pagers.py new file mode 100644 index 000000000..7ed6ecd83 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.OperationList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.OperationList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.OperationList], + request: compute.ListRegionOperationsRequest, + response: compute.OperationList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionOperationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.OperationList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.OperationList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Operation]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/__init__.py new file mode 100644 index 000000000..ff9b6276a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionOperationsTransport +from .rest import RegionOperationsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionOperationsTransport]] +_transport_registry['rest'] = RegionOperationsRestTransport + +__all__ = ( + 'RegionOperationsTransport', + 'RegionOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/base.py new file mode 100644 index 000000000..fd8e616e5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionOperationsTransport(abc.ABC): + """Abstract transport class for RegionOperations.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.wait: gapic_v1.method.wrap_method( + self.wait, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionOperationRequest], + Union[ + compute.DeleteRegionOperationResponse, + Awaitable[compute.DeleteRegionOperationResponse] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionOperationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionOperationsRequest], + Union[ + compute.OperationList, + Awaitable[compute.OperationList] + ]]: + raise NotImplementedError() + + @property + def wait(self) -> Callable[ + [compute.WaitRegionOperationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionOperationsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/rest.py new file mode 100644 index 000000000..f44b4605e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_operations/transports/rest.py @@ -0,0 +1,549 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionOperationsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionOperationsRestTransport(RegionOperationsTransport): + """REST backend transport for RegionOperations. + + The RegionOperations API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DeleteRegionOperationResponse: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionOperationRequest): + The request object. A request message for + RegionOperations.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DeleteRegionOperationResponse: + A response message for + RegionOperations.Delete. See the method + description for details. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.DeleteRegionOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionOperationRequest.to_json( + compute.DeleteRegionOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DeleteRegionOperationResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionOperationRequest): + The request object. A request message for + RegionOperations.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionOperationRequest.to_json( + compute.GetRegionOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionOperationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.OperationList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionOperationsRequest): + The request object. A request message for + RegionOperations.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.OperationList: + Contains a list of Operation + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/operations', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionOperationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionOperationsRequest.to_json( + compute.ListRegionOperationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.OperationList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _wait(self, + request: compute.WaitRegionOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the wait method over HTTP. + + Args: + request (~.compute.WaitRegionOperationRequest): + The request object. A request message for + RegionOperations.Wait. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/operations/{operation}/wait', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.WaitRegionOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.WaitRegionOperationRequest.to_json( + compute.WaitRegionOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionOperationRequest], + compute.DeleteRegionOperationResponse]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionOperationRequest], + compute.Operation]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListRegionOperationsRequest], + compute.OperationList]: + return self._list + @ property + def wait(self) -> Callable[ + [compute.WaitRegionOperationRequest], + compute.Operation]: + return self._wait + def close(self): + self._session.close() + + +__all__=( + 'RegionOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/__init__.py new file mode 100644 index 000000000..716ff5930 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionSslCertificatesClient + +__all__ = ( + 'RegionSslCertificatesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/client.py new file mode 100644 index 000000000..217ba8bf5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/client.py @@ -0,0 +1,746 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_ssl_certificates import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionSslCertificatesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionSslCertificatesRestTransport + + +class RegionSslCertificatesClientMeta(type): + """Metaclass for the RegionSslCertificates client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionSslCertificatesTransport]] + _transport_registry["rest"] = RegionSslCertificatesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionSslCertificatesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionSslCertificatesClient(metaclass=RegionSslCertificatesClientMeta): + """The RegionSslCertificates API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionSslCertificatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionSslCertificatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionSslCertificatesTransport: + """Returns the transport used by the client instance. + + Returns: + RegionSslCertificatesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionSslCertificatesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region ssl certificates client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionSslCertificatesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionSslCertificatesTransport): + # transport is a RegionSslCertificatesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionSslCertificateRequest, dict] = None, + *, + project: str = None, + region: str = None, + ssl_certificate: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified SslCertificate resource in the + region. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionSslCertificateRequest, dict]): + The request object. A request message for + RegionSslCertificates.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_certificate (str): + Name of the SslCertificate resource + to delete. + + This corresponds to the ``ssl_certificate`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, ssl_certificate]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionSslCertificateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionSslCertificateRequest): + request = compute.DeleteRegionSslCertificateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if ssl_certificate is not None: + request.ssl_certificate = ssl_certificate + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionSslCertificateRequest, dict] = None, + *, + project: str = None, + region: str = None, + ssl_certificate: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SslCertificate: + r"""Returns the specified SslCertificate resource in the + specified region. Get a list of available SSL + certificates by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionSslCertificateRequest, dict]): + The request object. A request message for + RegionSslCertificates.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_certificate (str): + Name of the SslCertificate resource + to return. + + This corresponds to the ``ssl_certificate`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SslCertificate: + Represents an SSL Certificate resource. Google Compute + Engine has two SSL Certificate resources: \* + [Global](/compute/docs/reference/rest/v1/sslCertificates) + \* + [Regional](/compute/docs/reference/rest/v1/regionSslCertificates) + The sslCertificates are used by: - external HTTPS load + balancers - SSL proxy load balancers The + regionSslCertificates are used by internal HTTPS load + balancers. Optionally, certificate file contents that + you upload can contain a set of up to five PEM-encoded + certificates. The API call creates an object + (sslCertificate) that holds this data. You can use SSL + keys and certificates to secure connections to a load + balancer. For more information, read Creating and using + SSL certificates, SSL certificates quotas and limits, + and Troubleshooting SSL certificates. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, ssl_certificate]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionSslCertificateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionSslCertificateRequest): + request = compute.GetRegionSslCertificateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if ssl_certificate is not None: + request.ssl_certificate = ssl_certificate + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionSslCertificateRequest, dict] = None, + *, + project: str = None, + region: str = None, + ssl_certificate_resource: compute.SslCertificate = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a SslCertificate resource in the specified + project and region using the data included in the + request + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionSslCertificateRequest, dict]): + The request object. A request message for + RegionSslCertificates.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_certificate_resource (google.cloud.compute_v1.types.SslCertificate): + The body resource for this request + This corresponds to the ``ssl_certificate_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, ssl_certificate_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionSslCertificateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionSslCertificateRequest): + request = compute.InsertRegionSslCertificateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if ssl_certificate_resource is not None: + request.ssl_certificate_resource = ssl_certificate_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionSslCertificatesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of SslCertificate resources + available to the specified project in the specified + region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionSslCertificatesRequest, dict]): + The request object. A request message for + RegionSslCertificates.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_ssl_certificates.pagers.ListPager: + Contains a list of SslCertificate + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionSslCertificatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionSslCertificatesRequest): + request = compute.ListRegionSslCertificatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionSslCertificatesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/pagers.py new file mode 100644 index 000000000..39e653fed --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SslCertificateList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SslCertificateList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SslCertificateList], + request: compute.ListRegionSslCertificatesRequest, + response: compute.SslCertificateList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionSslCertificatesRequest): + The initial request object. + response (google.cloud.compute_v1.types.SslCertificateList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionSslCertificatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SslCertificateList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.SslCertificate]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/__init__.py new file mode 100644 index 000000000..cadee01de --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionSslCertificatesTransport +from .rest import RegionSslCertificatesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionSslCertificatesTransport]] +_transport_registry['rest'] = RegionSslCertificatesRestTransport + +__all__ = ( + 'RegionSslCertificatesTransport', + 'RegionSslCertificatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/base.py new file mode 100644 index 000000000..a0b1a5c18 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionSslCertificatesTransport(abc.ABC): + """Abstract transport class for RegionSslCertificates.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionSslCertificateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionSslCertificateRequest], + Union[ + compute.SslCertificate, + Awaitable[compute.SslCertificate] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionSslCertificateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionSslCertificatesRequest], + Union[ + compute.SslCertificateList, + Awaitable[compute.SslCertificateList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionSslCertificatesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/rest.py new file mode 100644 index 000000000..7dfd767e7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_ssl_certificates/transports/rest.py @@ -0,0 +1,567 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionSslCertificatesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionSslCertificatesRestTransport(RegionSslCertificatesTransport): + """REST backend transport for RegionSslCertificates. + + The RegionSslCertificates API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionSslCertificateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionSslCertificateRequest): + The request object. A request message for + RegionSslCertificates.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/sslCertificates/{ssl_certificate}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "ssl_certificate", + "sslCertificate" + ), + ] + + request_kwargs = compute.DeleteRegionSslCertificateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionSslCertificateRequest.to_json( + compute.DeleteRegionSslCertificateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionSslCertificateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslCertificate: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionSslCertificateRequest): + The request object. A request message for + RegionSslCertificates.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslCertificate: + Represents an SSL Certificate resource. Google Compute + Engine has two SSL Certificate resources: \* + `Global `__ + \* + `Regional `__ + The sslCertificates are used by: - external HTTPS load + balancers - SSL proxy load balancers The + regionSslCertificates are used by internal HTTPS load + balancers. Optionally, certificate file contents that + you upload can contain a set of up to five PEM-encoded + certificates. The API call creates an object + (sslCertificate) that holds this data. You can use SSL + keys and certificates to secure connections to a load + balancer. For more information, read Creating and using + SSL certificates, SSL certificates quotas and limits, + and Troubleshooting SSL certificates. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/sslCertificates/{ssl_certificate}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "ssl_certificate", + "sslCertificate" + ), + ] + + request_kwargs = compute.GetRegionSslCertificateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionSslCertificateRequest.to_json( + compute.GetRegionSslCertificateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslCertificate.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionSslCertificateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionSslCertificateRequest): + The request object. A request message for + RegionSslCertificates.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/sslCertificates', + 'body': 'ssl_certificate_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionSslCertificateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SslCertificate.to_json( + compute.SslCertificate( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionSslCertificateRequest.to_json( + compute.InsertRegionSslCertificateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionSslCertificatesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslCertificateList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionSslCertificatesRequest): + The request object. A request message for + RegionSslCertificates.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslCertificateList: + Contains a list of SslCertificate + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/sslCertificates', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionSslCertificatesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionSslCertificatesRequest.to_json( + compute.ListRegionSslCertificatesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslCertificateList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionSslCertificateRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionSslCertificateRequest], + compute.SslCertificate]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionSslCertificateRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionSslCertificatesRequest], + compute.SslCertificateList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'RegionSslCertificatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/__init__.py new file mode 100644 index 000000000..be77796f5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionTargetHttpProxiesClient + +__all__ = ( + 'RegionTargetHttpProxiesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/client.py new file mode 100644 index 000000000..945d77f04 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/client.py @@ -0,0 +1,848 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_target_http_proxies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionTargetHttpProxiesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionTargetHttpProxiesRestTransport + + +class RegionTargetHttpProxiesClientMeta(type): + """Metaclass for the RegionTargetHttpProxies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionTargetHttpProxiesTransport]] + _transport_registry["rest"] = RegionTargetHttpProxiesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionTargetHttpProxiesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionTargetHttpProxiesClient(metaclass=RegionTargetHttpProxiesClientMeta): + """The RegionTargetHttpProxies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionTargetHttpProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionTargetHttpProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionTargetHttpProxiesTransport: + """Returns the transport used by the client instance. + + Returns: + RegionTargetHttpProxiesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionTargetHttpProxiesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region target http proxies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionTargetHttpProxiesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionTargetHttpProxiesTransport): + # transport is a RegionTargetHttpProxiesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_http_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetHttpProxy resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionTargetHttpProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpProxies.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy (str): + Name of the TargetHttpProxy resource + to delete. + + This corresponds to the ``target_http_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_http_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionTargetHttpProxyRequest): + request = compute.DeleteRegionTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_http_proxy is not None: + request.target_http_proxy = target_http_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_http_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetHttpProxy: + r"""Returns the specified TargetHttpProxy resource in the + specified region. Gets a list of available target HTTP + proxies by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionTargetHttpProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpProxies.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy (str): + Name of the TargetHttpProxy resource + to return. + + This corresponds to the ``target_http_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetHttpProxy: + Represents a Target HTTP Proxy resource. Google Compute + Engine has two Target HTTP Proxy resources: \* + [Global](/compute/docs/reference/rest/v1/targetHttpProxies) + \* + [Regional](/compute/docs/reference/rest/v1/regionTargetHttpProxies) + A target HTTP proxy is a component of GCP HTTP load + balancers. \* targetHttpProxies are used by external + HTTP load balancers and Traffic Director. \* + regionTargetHttpProxies are used by internal HTTP load + balancers. Forwarding rules reference a target HTTP + proxy, and the target proxy then references a URL map. + For more information, read Using Target Proxies and + Forwarding rule concepts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_http_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionTargetHttpProxyRequest): + request = compute.GetRegionTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_http_proxy is not None: + request.target_http_proxy = target_http_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_http_proxy_resource: compute.TargetHttpProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetHttpProxy resource in the specified + project and region using the data included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionTargetHttpProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpProxies.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy_resource (google.cloud.compute_v1.types.TargetHttpProxy): + The body resource for this request + This corresponds to the ``target_http_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_http_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionTargetHttpProxyRequest): + request = compute.InsertRegionTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_http_proxy_resource is not None: + request.target_http_proxy_resource = target_http_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionTargetHttpProxiesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of TargetHttpProxy resources + available to the specified project in the specified + region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionTargetHttpProxiesRequest, dict]): + The request object. A request message for + RegionTargetHttpProxies.List. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_target_http_proxies.pagers.ListPager: + A list of TargetHttpProxy resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionTargetHttpProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionTargetHttpProxiesRequest): + request = compute.ListRegionTargetHttpProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_url_map(self, + request: Union[compute.SetUrlMapRegionTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_http_proxy: str = None, + url_map_reference_resource: compute.UrlMapReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the URL map for TargetHttpProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetUrlMapRegionTargetHttpProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpProxies.SetUrlMap. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy (str): + Name of the TargetHttpProxy to set a + URL map for. + + This corresponds to the ``target_http_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + This corresponds to the ``url_map_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_http_proxy, url_map_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetUrlMapRegionTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetUrlMapRegionTargetHttpProxyRequest): + request = compute.SetUrlMapRegionTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_http_proxy is not None: + request.target_http_proxy = target_http_proxy + if url_map_reference_resource is not None: + request.url_map_reference_resource = url_map_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_url_map] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionTargetHttpProxiesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/pagers.py new file mode 100644 index 000000000..cd19d61c5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetHttpProxyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetHttpProxyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetHttpProxyList], + request: compute.ListRegionTargetHttpProxiesRequest, + response: compute.TargetHttpProxyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionTargetHttpProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetHttpProxyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionTargetHttpProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetHttpProxyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetHttpProxy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/__init__.py new file mode 100644 index 000000000..072defd33 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionTargetHttpProxiesTransport +from .rest import RegionTargetHttpProxiesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionTargetHttpProxiesTransport]] +_transport_registry['rest'] = RegionTargetHttpProxiesRestTransport + +__all__ = ( + 'RegionTargetHttpProxiesTransport', + 'RegionTargetHttpProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/base.py new file mode 100644 index 000000000..d4606da74 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionTargetHttpProxiesTransport(abc.ABC): + """Abstract transport class for RegionTargetHttpProxies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_url_map: gapic_v1.method.wrap_method( + self.set_url_map, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionTargetHttpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionTargetHttpProxyRequest], + Union[ + compute.TargetHttpProxy, + Awaitable[compute.TargetHttpProxy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionTargetHttpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionTargetHttpProxiesRequest], + Union[ + compute.TargetHttpProxyList, + Awaitable[compute.TargetHttpProxyList] + ]]: + raise NotImplementedError() + + @property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapRegionTargetHttpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionTargetHttpProxiesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/rest.py new file mode 100644 index 000000000..ed8c9a8f1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_http_proxies/transports/rest.py @@ -0,0 +1,684 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionTargetHttpProxiesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionTargetHttpProxiesRestTransport(RegionTargetHttpProxiesTransport): + """REST backend transport for RegionTargetHttpProxies. + + The RegionTargetHttpProxies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionTargetHttpProxyRequest): + The request object. A request message for + RegionTargetHttpProxies.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_http_proxy", + "targetHttpProxy" + ), + ] + + request_kwargs = compute.DeleteRegionTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionTargetHttpProxyRequest.to_json( + compute.DeleteRegionTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpProxy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionTargetHttpProxyRequest): + The request object. A request message for + RegionTargetHttpProxies.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpProxy: + Represents a Target HTTP Proxy resource. Google Compute + Engine has two Target HTTP Proxy resources: \* + `Global `__ + \* + `Regional `__ + A target HTTP proxy is a component of GCP HTTP load + balancers. \* targetHttpProxies are used by external + HTTP load balancers and Traffic Director. \* + regionTargetHttpProxies are used by internal HTTP load + balancers. Forwarding rules reference a target HTTP + proxy, and the target proxy then references a URL map. + For more information, read Using Target Proxies and + Forwarding rule concepts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_http_proxy", + "targetHttpProxy" + ), + ] + + request_kwargs = compute.GetRegionTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionTargetHttpProxyRequest.to_json( + compute.GetRegionTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpProxy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionTargetHttpProxyRequest): + The request object. A request message for + RegionTargetHttpProxies.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpProxies', + 'body': 'target_http_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpProxy.to_json( + compute.TargetHttpProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionTargetHttpProxyRequest.to_json( + compute.InsertRegionTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionTargetHttpProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpProxyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionTargetHttpProxiesRequest): + The request object. A request message for + RegionTargetHttpProxies.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpProxyList: + A list of TargetHttpProxy resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionTargetHttpProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionTargetHttpProxiesRequest.to_json( + compute.ListRegionTargetHttpProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpProxyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_url_map(self, + request: compute.SetUrlMapRegionTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set url map method over HTTP. + + Args: + request (~.compute.SetUrlMapRegionTargetHttpProxyRequest): + The request object. A request message for + RegionTargetHttpProxies.SetUrlMap. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}/setUrlMap', + 'body': 'url_map_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_http_proxy", + "targetHttpProxy" + ), + ] + + request_kwargs = compute.SetUrlMapRegionTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMapReference.to_json( + compute.UrlMapReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetUrlMapRegionTargetHttpProxyRequest.to_json( + compute.SetUrlMapRegionTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionTargetHttpProxyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionTargetHttpProxyRequest], + compute.TargetHttpProxy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionTargetHttpProxyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionTargetHttpProxiesRequest], + compute.TargetHttpProxyList]: + return self._list + @ property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapRegionTargetHttpProxyRequest], + compute.Operation]: + return self._set_url_map + def close(self): + self._session.close() + + +__all__=( + 'RegionTargetHttpProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/__init__.py new file mode 100644 index 000000000..431d1a18a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionTargetHttpsProxiesClient + +__all__ = ( + 'RegionTargetHttpsProxiesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/client.py new file mode 100644 index 000000000..3ff3786e3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/client.py @@ -0,0 +1,955 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_target_https_proxies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionTargetHttpsProxiesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionTargetHttpsProxiesRestTransport + + +class RegionTargetHttpsProxiesClientMeta(type): + """Metaclass for the RegionTargetHttpsProxies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionTargetHttpsProxiesTransport]] + _transport_registry["rest"] = RegionTargetHttpsProxiesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionTargetHttpsProxiesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionTargetHttpsProxiesClient(metaclass=RegionTargetHttpsProxiesClientMeta): + """The RegionTargetHttpsProxies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionTargetHttpsProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionTargetHttpsProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionTargetHttpsProxiesTransport: + """Returns the transport used by the client instance. + + Returns: + RegionTargetHttpsProxiesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionTargetHttpsProxiesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region target https proxies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionTargetHttpsProxiesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionTargetHttpsProxiesTransport): + # transport is a RegionTargetHttpsProxiesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_https_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetHttpsProxy resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionTargetHttpsProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpsProxies.Delete. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to delete. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_https_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionTargetHttpsProxyRequest): + request = compute.DeleteRegionTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_https_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetHttpsProxy: + r"""Returns the specified TargetHttpsProxy resource in + the specified region. Gets a list of available target + HTTP proxies by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionTargetHttpsProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpsProxies.Get. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to return. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetHttpsProxy: + Represents a Target HTTPS Proxy resource. Google Compute + Engine has two Target HTTPS Proxy resources: \* + [Global](/compute/docs/reference/rest/v1/targetHttpsProxies) + \* + [Regional](/compute/docs/reference/rest/v1/regionTargetHttpsProxies) + A target HTTPS proxy is a component of GCP HTTPS load + balancers. \* targetHttpsProxies are used by external + HTTPS load balancers. \* regionTargetHttpsProxies are + used by internal HTTPS load balancers. Forwarding rules + reference a target HTTPS proxy, and the target proxy + then references a URL map. For more information, read + Using Target Proxies and Forwarding rule concepts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_https_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionTargetHttpsProxyRequest): + request = compute.GetRegionTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_https_proxy_resource: compute.TargetHttpsProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetHttpsProxy resource in the specified + project and region using the data included in the + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionTargetHttpsProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpsProxies.Insert. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy_resource (google.cloud.compute_v1.types.TargetHttpsProxy): + The body resource for this request + This corresponds to the ``target_https_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_https_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionTargetHttpsProxyRequest): + request = compute.InsertRegionTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_https_proxy_resource is not None: + request.target_https_proxy_resource = target_https_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionTargetHttpsProxiesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of TargetHttpsProxy resources + available to the specified project in the specified + region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionTargetHttpsProxiesRequest, dict]): + The request object. A request message for + RegionTargetHttpsProxies.List. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_target_https_proxies.pagers.ListPager: + Contains a list of TargetHttpsProxy + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionTargetHttpsProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionTargetHttpsProxiesRequest): + request = compute.ListRegionTargetHttpsProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_ssl_certificates(self, + request: Union[compute.SetSslCertificatesRegionTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_https_proxy: str = None, + region_target_https_proxies_set_ssl_certificates_request_resource: compute.RegionTargetHttpsProxiesSetSslCertificatesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Replaces SslCertificates for TargetHttpsProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetSslCertificatesRegionTargetHttpsProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpsProxies.SetSslCertificates. See the + method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to set an SslCertificates resource for. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_target_https_proxies_set_ssl_certificates_request_resource (google.cloud.compute_v1.types.RegionTargetHttpsProxiesSetSslCertificatesRequest): + The body resource for this request + This corresponds to the ``region_target_https_proxies_set_ssl_certificates_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_https_proxy, region_target_https_proxies_set_ssl_certificates_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetSslCertificatesRegionTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetSslCertificatesRegionTargetHttpsProxyRequest): + request = compute.SetSslCertificatesRegionTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + if region_target_https_proxies_set_ssl_certificates_request_resource is not None: + request.region_target_https_proxies_set_ssl_certificates_request_resource = region_target_https_proxies_set_ssl_certificates_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_ssl_certificates] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_url_map(self, + request: Union[compute.SetUrlMapRegionTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_https_proxy: str = None, + url_map_reference_resource: compute.UrlMapReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the URL map for TargetHttpsProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetUrlMapRegionTargetHttpsProxyRequest, dict]): + The request object. A request message for + RegionTargetHttpsProxies.SetUrlMap. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy to set a + URL map for. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + This corresponds to the ``url_map_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_https_proxy, url_map_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetUrlMapRegionTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetUrlMapRegionTargetHttpsProxyRequest): + request = compute.SetUrlMapRegionTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + if url_map_reference_resource is not None: + request.url_map_reference_resource = url_map_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_url_map] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionTargetHttpsProxiesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/pagers.py new file mode 100644 index 000000000..fe2c4ddaa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetHttpsProxyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetHttpsProxyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetHttpsProxyList], + request: compute.ListRegionTargetHttpsProxiesRequest, + response: compute.TargetHttpsProxyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionTargetHttpsProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetHttpsProxyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionTargetHttpsProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetHttpsProxyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetHttpsProxy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/__init__.py new file mode 100644 index 000000000..d5e77ed66 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionTargetHttpsProxiesTransport +from .rest import RegionTargetHttpsProxiesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionTargetHttpsProxiesTransport]] +_transport_registry['rest'] = RegionTargetHttpsProxiesRestTransport + +__all__ = ( + 'RegionTargetHttpsProxiesTransport', + 'RegionTargetHttpsProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/base.py new file mode 100644 index 000000000..f8eec1cae --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionTargetHttpsProxiesTransport(abc.ABC): + """Abstract transport class for RegionTargetHttpsProxies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_ssl_certificates: gapic_v1.method.wrap_method( + self.set_ssl_certificates, + default_timeout=None, + client_info=client_info, + ), + self.set_url_map: gapic_v1.method.wrap_method( + self.set_url_map, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionTargetHttpsProxyRequest], + Union[ + compute.TargetHttpsProxy, + Awaitable[compute.TargetHttpsProxy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionTargetHttpsProxiesRequest], + Union[ + compute.TargetHttpsProxyList, + Awaitable[compute.TargetHttpsProxyList] + ]]: + raise NotImplementedError() + + @property + def set_ssl_certificates(self) -> Callable[ + [compute.SetSslCertificatesRegionTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapRegionTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionTargetHttpsProxiesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/rest.py new file mode 100644 index 000000000..baede5cf6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_target_https_proxies/transports/rest.py @@ -0,0 +1,807 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionTargetHttpsProxiesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionTargetHttpsProxiesRestTransport(RegionTargetHttpsProxiesTransport): + """REST backend transport for RegionTargetHttpsProxies. + + The RegionTargetHttpsProxies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionTargetHttpsProxyRequest): + The request object. A request message for + RegionTargetHttpsProxies.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.DeleteRegionTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionTargetHttpsProxyRequest.to_json( + compute.DeleteRegionTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpsProxy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionTargetHttpsProxyRequest): + The request object. A request message for + RegionTargetHttpsProxies.Get. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpsProxy: + Represents a Target HTTPS Proxy resource. Google Compute + Engine has two Target HTTPS Proxy resources: \* + `Global `__ + \* + `Regional `__ + A target HTTPS proxy is a component of GCP HTTPS load + balancers. \* targetHttpsProxies are used by external + HTTPS load balancers. \* regionTargetHttpsProxies are + used by internal HTTPS load balancers. Forwarding rules + reference a target HTTPS proxy, and the target proxy + then references a URL map. For more information, read + Using Target Proxies and Forwarding rule concepts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.GetRegionTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionTargetHttpsProxyRequest.to_json( + compute.GetRegionTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpsProxy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionTargetHttpsProxyRequest): + The request object. A request message for + RegionTargetHttpsProxies.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies', + 'body': 'target_https_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpsProxy.to_json( + compute.TargetHttpsProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionTargetHttpsProxyRequest.to_json( + compute.InsertRegionTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionTargetHttpsProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpsProxyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionTargetHttpsProxiesRequest): + The request object. A request message for + RegionTargetHttpsProxies.List. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpsProxyList: + Contains a list of TargetHttpsProxy + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionTargetHttpsProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionTargetHttpsProxiesRequest.to_json( + compute.ListRegionTargetHttpsProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpsProxyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_ssl_certificates(self, + request: compute.SetSslCertificatesRegionTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set ssl certificates method over HTTP. + + Args: + request (~.compute.SetSslCertificatesRegionTargetHttpsProxyRequest): + The request object. A request message for + RegionTargetHttpsProxies.SetSslCertificates. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}/setSslCertificates', + 'body': 'region_target_https_proxies_set_ssl_certificates_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.SetSslCertificatesRegionTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionTargetHttpsProxiesSetSslCertificatesRequest.to_json( + compute.RegionTargetHttpsProxiesSetSslCertificatesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetSslCertificatesRegionTargetHttpsProxyRequest.to_json( + compute.SetSslCertificatesRegionTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_url_map(self, + request: compute.SetUrlMapRegionTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set url map method over HTTP. + + Args: + request (~.compute.SetUrlMapRegionTargetHttpsProxyRequest): + The request object. A request message for + RegionTargetHttpsProxies.SetUrlMap. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}/setUrlMap', + 'body': 'url_map_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.SetUrlMapRegionTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMapReference.to_json( + compute.UrlMapReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetUrlMapRegionTargetHttpsProxyRequest.to_json( + compute.SetUrlMapRegionTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionTargetHttpsProxyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionTargetHttpsProxyRequest], + compute.TargetHttpsProxy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionTargetHttpsProxyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionTargetHttpsProxiesRequest], + compute.TargetHttpsProxyList]: + return self._list + @ property + def set_ssl_certificates(self) -> Callable[ + [compute.SetSslCertificatesRegionTargetHttpsProxyRequest], + compute.Operation]: + return self._set_ssl_certificates + @ property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapRegionTargetHttpsProxyRequest], + compute.Operation]: + return self._set_url_map + def close(self): + self._session.close() + + +__all__=( + 'RegionTargetHttpsProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/__init__.py new file mode 100644 index 000000000..b869b700b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionUrlMapsClient + +__all__ = ( + 'RegionUrlMapsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/client.py new file mode 100644 index 000000000..fead626bd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/client.py @@ -0,0 +1,1056 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.region_url_maps import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionUrlMapsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionUrlMapsRestTransport + + +class RegionUrlMapsClientMeta(type): + """Metaclass for the RegionUrlMaps client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionUrlMapsTransport]] + _transport_registry["rest"] = RegionUrlMapsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionUrlMapsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionUrlMapsClient(metaclass=RegionUrlMapsClientMeta): + """The RegionUrlMaps API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionUrlMapsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionUrlMapsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionUrlMapsTransport: + """Returns the transport used by the client instance. + + Returns: + RegionUrlMapsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionUrlMapsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the region url maps client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionUrlMapsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionUrlMapsTransport): + # transport is a RegionUrlMapsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRegionUrlMapRequest, dict] = None, + *, + project: str = None, + region: str = None, + url_map: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified UrlMap resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRegionUrlMapRequest, dict]): + The request object. A request message for + RegionUrlMaps.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to + delete. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, url_map]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRegionUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRegionUrlMapRequest): + request = compute.DeleteRegionUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if url_map is not None: + request.url_map = url_map + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRegionUrlMapRequest, dict] = None, + *, + project: str = None, + region: str = None, + url_map: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.UrlMap: + r"""Returns the specified UrlMap resource. Gets a list of + available URL maps by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionUrlMapRequest, dict]): + The request object. A request message for + RegionUrlMaps.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to + return. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.UrlMap: + Represents a URL Map resource. Google Compute Engine has + two URL Map resources: \* + [Global](/compute/docs/reference/rest/v1/urlMaps) \* + [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) + A URL map resource is a component of certain types of + GCP load balancers and Traffic Director. \* urlMaps are + used by external HTTP(S) load balancers and Traffic + Director. \* regionUrlMaps are used by internal HTTP(S) + load balancers. For a list of supported URL map features + by load balancer type, see the Load balancing features: + Routing and traffic management table. For a list of + supported URL map features for Traffic Director, see the + Traffic Director features: Routing and traffic + management table. This resource defines mappings from + host names and URL paths to either a backend service or + a backend bucket. To use the global urlMaps resource, + the backend service must have a loadBalancingScheme of + either EXTERNAL or INTERNAL_SELF_MANAGED. To use the + regionUrlMaps resource, the backend service must have a + loadBalancingScheme of INTERNAL_MANAGED. For more + information, read URL Map Concepts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, url_map]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionUrlMapRequest): + request = compute.GetRegionUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if url_map is not None: + request.url_map = url_map + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRegionUrlMapRequest, dict] = None, + *, + project: str = None, + region: str = None, + url_map_resource: compute.UrlMap = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a UrlMap resource in the specified project + using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRegionUrlMapRequest, dict]): + The request object. A request message for + RegionUrlMaps.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + This corresponds to the ``url_map_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, url_map_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRegionUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRegionUrlMapRequest): + request = compute.InsertRegionUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if url_map_resource is not None: + request.url_map_resource = url_map_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionUrlMapsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of UrlMap resources available to + the specified project in the specified region. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionUrlMapsRequest, dict]): + The request object. A request message for + RegionUrlMaps.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.region_url_maps.pagers.ListPager: + Contains a list of UrlMap resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionUrlMapsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionUrlMapsRequest): + request = compute.ListRegionUrlMapsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchRegionUrlMapRequest, dict] = None, + *, + project: str = None, + region: str = None, + url_map: str = None, + url_map_resource: compute.UrlMap = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified UrlMap resource with the data + included in the request. This method supports PATCH + semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRegionUrlMapRequest, dict]): + The request object. A request message for + RegionUrlMaps.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to patch. + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + This corresponds to the ``url_map_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, url_map, url_map_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRegionUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRegionUrlMapRequest): + request = compute.PatchRegionUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if url_map is not None: + request.url_map = url_map + if url_map_resource is not None: + request.url_map_resource = url_map_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateRegionUrlMapRequest, dict] = None, + *, + project: str = None, + region: str = None, + url_map: str = None, + url_map_resource: compute.UrlMap = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified UrlMap resource with the data + included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateRegionUrlMapRequest, dict]): + The request object. A request message for + RegionUrlMaps.Update. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to + update. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + This corresponds to the ``url_map_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, url_map, url_map_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateRegionUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateRegionUrlMapRequest): + request = compute.UpdateRegionUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if url_map is not None: + request.url_map = url_map + if url_map_resource is not None: + request.url_map_resource = url_map_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def validate(self, + request: Union[compute.ValidateRegionUrlMapRequest, dict] = None, + *, + project: str = None, + region: str = None, + url_map: str = None, + region_url_maps_validate_request_resource: compute.RegionUrlMapsValidateRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.UrlMapsValidateResponse: + r"""Runs static validation for the UrlMap. In particular, + the tests of the provided UrlMap will be run. Calling + this method does NOT create the UrlMap. + + Args: + request (Union[google.cloud.compute_v1.types.ValidateRegionUrlMapRequest, dict]): + The request object. A request message for + RegionUrlMaps.Validate. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to be + validated as. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_url_maps_validate_request_resource (google.cloud.compute_v1.types.RegionUrlMapsValidateRequest): + The body resource for this request + This corresponds to the ``region_url_maps_validate_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.UrlMapsValidateResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, url_map, region_url_maps_validate_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ValidateRegionUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ValidateRegionUrlMapRequest): + request = compute.ValidateRegionUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if url_map is not None: + request.url_map = url_map + if region_url_maps_validate_request_resource is not None: + request.region_url_maps_validate_request_resource = region_url_maps_validate_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.validate] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionUrlMapsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/pagers.py new file mode 100644 index 000000000..38aae40d9 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.UrlMapList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.UrlMapList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.UrlMapList], + request: compute.ListRegionUrlMapsRequest, + response: compute.UrlMapList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionUrlMapsRequest): + The initial request object. + response (google.cloud.compute_v1.types.UrlMapList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionUrlMapsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.UrlMapList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.UrlMap]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/__init__.py new file mode 100644 index 000000000..6fddc68ec --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionUrlMapsTransport +from .rest import RegionUrlMapsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionUrlMapsTransport]] +_transport_registry['rest'] = RegionUrlMapsRestTransport + +__all__ = ( + 'RegionUrlMapsTransport', + 'RegionUrlMapsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/base.py new file mode 100644 index 000000000..99d0deda3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionUrlMapsTransport(abc.ABC): + """Abstract transport class for RegionUrlMaps.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + self.validate: gapic_v1.method.wrap_method( + self.validate, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRegionUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionUrlMapRequest], + Union[ + compute.UrlMap, + Awaitable[compute.UrlMap] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRegionUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionUrlMapsRequest], + Union[ + compute.UrlMapList, + Awaitable[compute.UrlMapList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchRegionUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateRegionUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def validate(self) -> Callable[ + [compute.ValidateRegionUrlMapRequest], + Union[ + compute.UrlMapsValidateResponse, + Awaitable[compute.UrlMapsValidateResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionUrlMapsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/rest.py new file mode 100644 index 000000000..470fe2c13 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/region_url_maps/transports/rest.py @@ -0,0 +1,921 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionUrlMapsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionUrlMapsRestTransport(RegionUrlMapsTransport): + """REST backend transport for RegionUrlMaps. + + The RegionUrlMaps API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRegionUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRegionUrlMapRequest): + The request object. A request message for + RegionUrlMaps.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.DeleteRegionUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRegionUrlMapRequest.to_json( + compute.DeleteRegionUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRegionUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UrlMap: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionUrlMapRequest): + The request object. A request message for + RegionUrlMaps.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UrlMap: + Represents a URL Map resource. Google Compute Engine has + two URL Map resources: \* + `Global `__ \* + `Regional `__ + A URL map resource is a component of certain types of + GCP load balancers and Traffic Director. \* urlMaps are + used by external HTTP(S) load balancers and Traffic + Director. \* regionUrlMaps are used by internal HTTP(S) + load balancers. For a list of supported URL map features + by load balancer type, see the Load balancing features: + Routing and traffic management table. For a list of + supported URL map features for Traffic Director, see the + Traffic Director features: Routing and traffic + management table. This resource defines mappings from + host names and URL paths to either a backend service or + a backend bucket. To use the global urlMaps resource, + the backend service must have a loadBalancingScheme of + either EXTERNAL or INTERNAL_SELF_MANAGED. To use the + regionUrlMaps resource, the backend service must have a + loadBalancingScheme of INTERNAL_MANAGED. For more + information, read URL Map Concepts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.GetRegionUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionUrlMapRequest.to_json( + compute.GetRegionUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UrlMap.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRegionUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRegionUrlMapRequest): + The request object. A request message for + RegionUrlMaps.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/urlMaps', + 'body': 'url_map_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRegionUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMap.to_json( + compute.UrlMap( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRegionUrlMapRequest.to_json( + compute.InsertRegionUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionUrlMapsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UrlMapList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionUrlMapsRequest): + The request object. A request message for + RegionUrlMaps.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UrlMapList: + Contains a list of UrlMap resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/urlMaps', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRegionUrlMapsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionUrlMapsRequest.to_json( + compute.ListRegionUrlMapsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UrlMapList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchRegionUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchRegionUrlMapRequest): + The request object. A request message for + RegionUrlMaps.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}', + 'body': 'url_map_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.PatchRegionUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMap.to_json( + compute.UrlMap( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRegionUrlMapRequest.to_json( + compute.PatchRegionUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateRegionUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateRegionUrlMapRequest): + The request object. A request message for + RegionUrlMaps.Update. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}', + 'body': 'url_map_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.UpdateRegionUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMap.to_json( + compute.UrlMap( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateRegionUrlMapRequest.to_json( + compute.UpdateRegionUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _validate(self, + request: compute.ValidateRegionUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UrlMapsValidateResponse: + r"""Call the validate method over HTTP. + + Args: + request (~.compute.ValidateRegionUrlMapRequest): + The request object. A request message for + RegionUrlMaps.Validate. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UrlMapsValidateResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}/validate', + 'body': 'region_url_maps_validate_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.ValidateRegionUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionUrlMapsValidateRequest.to_json( + compute.RegionUrlMapsValidateRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ValidateRegionUrlMapRequest.to_json( + compute.ValidateRegionUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UrlMapsValidateResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRegionUrlMapRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRegionUrlMapRequest], + compute.UrlMap]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRegionUrlMapRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRegionUrlMapsRequest], + compute.UrlMapList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchRegionUrlMapRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateRegionUrlMapRequest], + compute.Operation]: + return self._update + @ property + def validate(self) -> Callable[ + [compute.ValidateRegionUrlMapRequest], + compute.UrlMapsValidateResponse]: + return self._validate + def close(self): + self._session.close() + + +__all__=( + 'RegionUrlMapsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/__init__.py new file mode 100644 index 000000000..454e5827d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RegionsClient + +__all__ = ( + 'RegionsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/client.py new file mode 100644 index 000000000..7ea64d634 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/client.py @@ -0,0 +1,510 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.regions import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RegionsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RegionsRestTransport + + +class RegionsClientMeta(type): + """Metaclass for the Regions client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RegionsTransport]] + _transport_registry["rest"] = RegionsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RegionsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RegionsClient(metaclass=RegionsClientMeta): + """The Regions API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RegionsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RegionsTransport: + """Returns the transport used by the client instance. + + Returns: + RegionsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RegionsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the regions client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RegionsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RegionsTransport): + # transport is a RegionsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def get(self, + request: Union[compute.GetRegionRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Region: + r"""Returns the specified Region resource. Gets a list of + available regions by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRegionRequest, dict]): + The request object. A request message for Regions.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region resource to + return. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Region: + Represents a Region resource. A + region is a geographical area where a + resource is located. For more + information, read Regions and Zones. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRegionRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRegionRequest): + request = compute.GetRegionRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRegionsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of region resources available to + the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListRegionsRequest, dict]): + The request object. A request message for Regions.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.regions.pagers.ListPager: + Contains a list of region resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRegionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRegionsRequest): + request = compute.ListRegionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RegionsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/pagers.py new file mode 100644 index 000000000..2984ab6d0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RegionList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RegionList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RegionList], + request: compute.ListRegionsRequest, + response: compute.RegionList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRegionsRequest): + The initial request object. + response (google.cloud.compute_v1.types.RegionList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRegionsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RegionList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Region]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/__init__.py new file mode 100644 index 000000000..999b0d031 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RegionsTransport +from .rest import RegionsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RegionsTransport]] +_transport_registry['rest'] = RegionsRestTransport + +__all__ = ( + 'RegionsTransport', + 'RegionsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/base.py new file mode 100644 index 000000000..efd26bcc6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/base.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RegionsTransport(abc.ABC): + """Abstract transport class for Regions.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRegionRequest], + Union[ + compute.Region, + Awaitable[compute.Region] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRegionsRequest], + Union[ + compute.RegionList, + Awaitable[compute.RegionList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RegionsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/rest.py new file mode 100644 index 000000000..0526b1363 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/regions/transports/rest.py @@ -0,0 +1,312 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RegionsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RegionsRestTransport(RegionsTransport): + """REST backend transport for Regions. + + The Regions API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _get(self, + request: compute.GetRegionRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Region: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRegionRequest): + The request object. A request message for Regions.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Region: + Represents a Region resource. A + region is a geographical area where a + resource is located. For more + information, read Regions and Zones. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.GetRegionRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRegionRequest.to_json( + compute.GetRegionRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Region.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRegionsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RegionList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRegionsRequest): + The request object. A request message for Regions.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RegionList: + Contains a list of region resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListRegionsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRegionsRequest.to_json( + compute.ListRegionsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RegionList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def get(self) -> Callable[ + [compute.GetRegionRequest], + compute.Region]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListRegionsRequest], + compute.RegionList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'RegionsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/__init__.py new file mode 100644 index 000000000..c4171b68c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ReservationsClient + +__all__ = ( + 'ReservationsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/client.py new file mode 100644 index 000000000..0f7a048a6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/client.py @@ -0,0 +1,1249 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.reservations import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ReservationsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ReservationsRestTransport + + +class ReservationsClientMeta(type): + """Metaclass for the Reservations client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ReservationsTransport]] + _transport_registry["rest"] = ReservationsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ReservationsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ReservationsClient(metaclass=ReservationsClientMeta): + """The Reservations API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ReservationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ReservationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ReservationsTransport: + """Returns the transport used by the client instance. + + Returns: + ReservationsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ReservationsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the reservations client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ReservationsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ReservationsTransport): + # transport is a ReservationsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListReservationsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of reservations. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListReservationsRequest, dict]): + The request object. A request message for + Reservations.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.reservations.pagers.AggregatedListPager: + Contains a list of reservations. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListReservationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListReservationsRequest): + request = compute.AggregatedListReservationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteReservationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + reservation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified reservation. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteReservationRequest, dict]): + The request object. A request message for + Reservations.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + reservation (str): + Name of the reservation to delete. + This corresponds to the ``reservation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, reservation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteReservationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteReservationRequest): + request = compute.DeleteReservationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if reservation is not None: + request.reservation = reservation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetReservationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + reservation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Reservation: + r"""Retrieves information about the specified + reservation. + + Args: + request (Union[google.cloud.compute_v1.types.GetReservationRequest, dict]): + The request object. A request message for + Reservations.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + reservation (str): + Name of the reservation to retrieve. + This corresponds to the ``reservation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Reservation: + Represents a reservation resource. A + reservation ensures that capacity is + held in a specific zone even if the + reserved VMs are not running. For more + information, read Reserving zonal + resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, reservation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetReservationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetReservationRequest): + request = compute.GetReservationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if reservation is not None: + request.reservation = reservation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyReservationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyReservationRequest, dict]): + The request object. A request message for + Reservations.GetIamPolicy. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyReservationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyReservationRequest): + request = compute.GetIamPolicyReservationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertReservationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + reservation_resource: compute.Reservation = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a new reservation. For more information, read + Reserving zonal resources. + + Args: + request (Union[google.cloud.compute_v1.types.InsertReservationRequest, dict]): + The request object. A request message for + Reservations.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + reservation_resource (google.cloud.compute_v1.types.Reservation): + The body resource for this request + This corresponds to the ``reservation_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, reservation_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertReservationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertReservationRequest): + request = compute.InsertReservationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if reservation_resource is not None: + request.reservation_resource = reservation_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListReservationsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""A list of all the reservations that have been + configured for the specified project in specified zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListReservationsRequest, dict]): + The request object. A request message for + Reservations.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.reservations.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListReservationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListReservationsRequest): + request = compute.ListReservationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def resize(self, + request: Union[compute.ResizeReservationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + reservation: str = None, + reservations_resize_request_resource: compute.ReservationsResizeRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Resizes the reservation (applicable to standalone + reservations only). For more information, read Modifying + reservations. + + Args: + request (Union[google.cloud.compute_v1.types.ResizeReservationRequest, dict]): + The request object. A request message for + Reservations.Resize. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + reservation (str): + Name of the reservation to update. + This corresponds to the ``reservation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + reservations_resize_request_resource (google.cloud.compute_v1.types.ReservationsResizeRequest): + The body resource for this request + This corresponds to the ``reservations_resize_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, reservation, reservations_resize_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ResizeReservationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ResizeReservationRequest): + request = compute.ResizeReservationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if reservation is not None: + request.reservation = reservation + if reservations_resize_request_resource is not None: + request.reservations_resize_request_resource = reservations_resize_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resize] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyReservationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + zone_set_policy_request_resource: compute.ZoneSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyReservationRequest, dict]): + The request object. A request message for + Reservations.SetIamPolicy. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + This corresponds to the ``zone_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, zone_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyReservationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyReservationRequest): + request = compute.SetIamPolicyReservationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if zone_set_policy_request_resource is not None: + request.zone_set_policy_request_resource = zone_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsReservationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsReservationRequest, dict]): + The request object. A request message for + Reservations.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + The name of the zone for this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsReservationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsReservationRequest): + request = compute.TestIamPermissionsReservationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ReservationsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/pagers.py new file mode 100644 index 000000000..ff24d5dbd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ReservationAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ReservationAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ReservationAggregatedList], + request: compute.AggregatedListReservationsRequest, + response: compute.ReservationAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListReservationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.ReservationAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListReservationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ReservationAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.ReservationsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.ReservationsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ReservationList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ReservationList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ReservationList], + request: compute.ListReservationsRequest, + response: compute.ReservationList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListReservationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.ReservationList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListReservationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ReservationList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Reservation]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/__init__.py new file mode 100644 index 000000000..ab03d5c7f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ReservationsTransport +from .rest import ReservationsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ReservationsTransport]] +_transport_registry['rest'] = ReservationsRestTransport + +__all__ = ( + 'ReservationsTransport', + 'ReservationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/base.py new file mode 100644 index 000000000..0c029bcb8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ReservationsTransport(abc.ABC): + """Abstract transport class for Reservations.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.resize: gapic_v1.method.wrap_method( + self.resize, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListReservationsRequest], + Union[ + compute.ReservationAggregatedList, + Awaitable[compute.ReservationAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteReservationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetReservationRequest], + Union[ + compute.Reservation, + Awaitable[compute.Reservation] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyReservationRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertReservationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListReservationsRequest], + Union[ + compute.ReservationList, + Awaitable[compute.ReservationList] + ]]: + raise NotImplementedError() + + @property + def resize(self) -> Callable[ + [compute.ResizeReservationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyReservationRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsReservationRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ReservationsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/rest.py new file mode 100644 index 000000000..8de8ab52f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/reservations/transports/rest.py @@ -0,0 +1,1159 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ReservationsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ReservationsRestTransport(ReservationsTransport): + """REST backend transport for Reservations. + + The Reservations API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListReservationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ReservationAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListReservationsRequest): + The request object. A request message for + Reservations.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ReservationAggregatedList: + Contains a list of reservations. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/reservations', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListReservationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListReservationsRequest.to_json( + compute.AggregatedListReservationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ReservationAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteReservationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteReservationRequest): + The request object. A request message for + Reservations.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations/{reservation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "reservation", + "reservation" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteReservationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteReservationRequest.to_json( + compute.DeleteReservationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetReservationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Reservation: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetReservationRequest): + The request object. A request message for + Reservations.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Reservation: + Represents a reservation resource. A + reservation ensures that capacity is + held in a specific zone even if the + reserved VMs are not running. For more + information, read Reserving zonal + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations/{reservation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "reservation", + "reservation" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetReservationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetReservationRequest.to_json( + compute.GetReservationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Reservation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyReservationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyReservationRequest): + The request object. A request message for + Reservations.GetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetIamPolicyReservationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyReservationRequest.to_json( + compute.GetIamPolicyReservationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertReservationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertReservationRequest): + The request object. A request message for + Reservations.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations', + 'body': 'reservation_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertReservationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Reservation.to_json( + compute.Reservation( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertReservationRequest.to_json( + compute.InsertReservationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListReservationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ReservationList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListReservationsRequest): + The request object. A request message for + Reservations.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ReservationList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListReservationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListReservationsRequest.to_json( + compute.ListReservationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ReservationList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _resize(self, + request: compute.ResizeReservationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the resize method over HTTP. + + Args: + request (~.compute.ResizeReservationRequest): + The request object. A request message for + Reservations.Resize. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations/{reservation}/resize', + 'body': 'reservations_resize_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "reservation", + "reservation" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ResizeReservationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ReservationsResizeRequest.to_json( + compute.ReservationsResizeRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ResizeReservationRequest.to_json( + compute.ResizeReservationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyReservationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyReservationRequest): + The request object. A request message for + Reservations.SetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations/{resource}/setIamPolicy', + 'body': 'zone_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.SetIamPolicyReservationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ZoneSetPolicyRequest.to_json( + compute.ZoneSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyReservationRequest.to_json( + compute.SetIamPolicyReservationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsReservationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsReservationRequest): + The request object. A request message for + Reservations.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/reservations/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.TestIamPermissionsReservationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsReservationRequest.to_json( + compute.TestIamPermissionsReservationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListReservationsRequest], + compute.ReservationAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteReservationRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetReservationRequest], + compute.Reservation]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyReservationRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertReservationRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListReservationsRequest], + compute.ReservationList]: + return self._list + @ property + def resize(self) -> Callable[ + [compute.ResizeReservationRequest], + compute.Operation]: + return self._resize + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyReservationRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsReservationRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'ReservationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/__init__.py new file mode 100644 index 000000000..a9ed88e28 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ResourcePoliciesClient + +__all__ = ( + 'ResourcePoliciesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/client.py new file mode 100644 index 000000000..4b55de5ff --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/client.py @@ -0,0 +1,1148 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.resource_policies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ResourcePoliciesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ResourcePoliciesRestTransport + + +class ResourcePoliciesClientMeta(type): + """Metaclass for the ResourcePolicies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ResourcePoliciesTransport]] + _transport_registry["rest"] = ResourcePoliciesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ResourcePoliciesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ResourcePoliciesClient(metaclass=ResourcePoliciesClientMeta): + """The ResourcePolicies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ResourcePoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ResourcePoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ResourcePoliciesTransport: + """Returns the transport used by the client instance. + + Returns: + ResourcePoliciesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ResourcePoliciesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the resource policies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ResourcePoliciesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ResourcePoliciesTransport): + # transport is a ResourcePoliciesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListResourcePoliciesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of resource policies. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListResourcePoliciesRequest, dict]): + The request object. A request message for + ResourcePolicies.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.resource_policies.pagers.AggregatedListPager: + Contains a list of resourcePolicies. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListResourcePoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListResourcePoliciesRequest): + request = compute.AggregatedListResourcePoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteResourcePolicyRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified resource policy. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy (str): + Name of the resource policy to + delete. + + This corresponds to the ``resource_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteResourcePolicyRequest): + request = compute.DeleteResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource_policy is not None: + request.resource_policy = resource_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetResourcePolicyRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.ResourcePolicy: + r"""Retrieves all information of the specified resource + policy. + + Args: + request (Union[google.cloud.compute_v1.types.GetResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy (str): + Name of the resource policy to + retrieve. + + This corresponds to the ``resource_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.ResourcePolicy: + Represents a Resource Policy + resource. You can use resource policies + to schedule actions for some Compute + Engine resources. For example, you can + use them to schedule persistent disk + snapshots. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetResourcePolicyRequest): + request = compute.GetResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource_policy is not None: + request.resource_policy = resource_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyResourcePolicyRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.GetIamPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyResourcePolicyRequest): + request = compute.GetIamPolicyResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertResourcePolicyRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource_policy_resource: compute.ResourcePolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a new resource policy. + + Args: + request (Union[google.cloud.compute_v1.types.InsertResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + This corresponds to the ``resource_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertResourcePolicyRequest): + request = compute.InsertResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource_policy_resource is not None: + request.resource_policy_resource = resource_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListResourcePoliciesRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""A list all the resource policies that have been + configured for the specified project in specified + region. + + Args: + request (Union[google.cloud.compute_v1.types.ListResourcePoliciesRequest, dict]): + The request object. A request message for + ResourcePolicies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.resource_policies.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListResourcePoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListResourcePoliciesRequest): + request = compute.ListResourcePoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyResourcePolicyRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_policy_request_resource: compute.RegionSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.SetIamPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + This corresponds to the ``region_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyResourcePolicyRequest): + request = compute.SetIamPolicyResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_policy_request_resource is not None: + request.region_set_policy_request_resource = region_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsResourcePolicyRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsResourcePolicyRequest, dict]): + The request object. A request message for + ResourcePolicies.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsResourcePolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsResourcePolicyRequest): + request = compute.TestIamPermissionsResourcePolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ResourcePoliciesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/pagers.py new file mode 100644 index 000000000..ace644a96 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ResourcePolicyAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ResourcePolicyAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ResourcePolicyAggregatedList], + request: compute.AggregatedListResourcePoliciesRequest, + response: compute.ResourcePolicyAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListResourcePoliciesRequest): + The initial request object. + response (google.cloud.compute_v1.types.ResourcePolicyAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListResourcePoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ResourcePolicyAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.ResourcePoliciesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.ResourcePoliciesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ResourcePolicyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ResourcePolicyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ResourcePolicyList], + request: compute.ListResourcePoliciesRequest, + response: compute.ResourcePolicyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListResourcePoliciesRequest): + The initial request object. + response (google.cloud.compute_v1.types.ResourcePolicyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListResourcePoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ResourcePolicyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ResourcePolicy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/__init__.py new file mode 100644 index 000000000..94122fe40 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ResourcePoliciesTransport +from .rest import ResourcePoliciesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ResourcePoliciesTransport]] +_transport_registry['rest'] = ResourcePoliciesRestTransport + +__all__ = ( + 'ResourcePoliciesTransport', + 'ResourcePoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/base.py new file mode 100644 index 000000000..35157f8d6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/base.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ResourcePoliciesTransport(abc.ABC): + """Abstract transport class for ResourcePolicies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListResourcePoliciesRequest], + Union[ + compute.ResourcePolicyAggregatedList, + Awaitable[compute.ResourcePolicyAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteResourcePolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetResourcePolicyRequest], + Union[ + compute.ResourcePolicy, + Awaitable[compute.ResourcePolicy] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyResourcePolicyRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertResourcePolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListResourcePoliciesRequest], + Union[ + compute.ResourcePolicyList, + Awaitable[compute.ResourcePolicyList] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyResourcePolicyRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsResourcePolicyRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ResourcePoliciesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/rest.py new file mode 100644 index 000000000..00ff90bca --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/resource_policies/transports/rest.py @@ -0,0 +1,1037 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ResourcePoliciesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ResourcePoliciesRestTransport(ResourcePoliciesTransport): + """REST backend transport for ResourcePolicies. + + The ResourcePolicies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListResourcePoliciesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ResourcePolicyAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListResourcePoliciesRequest): + The request object. A request message for + ResourcePolicies.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ResourcePolicyAggregatedList: + Contains a list of resourcePolicies. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/resourcePolicies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListResourcePoliciesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListResourcePoliciesRequest.to_json( + compute.AggregatedListResourcePoliciesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ResourcePolicyAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteResourcePolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource_policy", + "resourcePolicy" + ), + ] + + request_kwargs = compute.DeleteResourcePolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteResourcePolicyRequest.to_json( + compute.DeleteResourcePolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetResourcePolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ResourcePolicy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ResourcePolicy: + Represents a Resource Policy + resource. You can use resource policies + to schedule actions for some Compute + Engine resources. For example, you can + use them to schedule persistent disk + snapshots. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource_policy", + "resourcePolicy" + ), + ] + + request_kwargs = compute.GetResourcePolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetResourcePolicyRequest.to_json( + compute.GetResourcePolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ResourcePolicy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyResourcePolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.GetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyResourcePolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyResourcePolicyRequest.to_json( + compute.GetIamPolicyResourcePolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertResourcePolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/resourcePolicies', + 'body': 'resource_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertResourcePolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ResourcePolicy.to_json( + compute.ResourcePolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertResourcePolicyRequest.to_json( + compute.InsertResourcePolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListResourcePoliciesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ResourcePolicyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListResourcePoliciesRequest): + The request object. A request message for + ResourcePolicies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ResourcePolicyList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/resourcePolicies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListResourcePoliciesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListResourcePoliciesRequest.to_json( + compute.ListResourcePoliciesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ResourcePolicyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyResourcePolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.SetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy', + 'body': 'region_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyResourcePolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetPolicyRequest.to_json( + compute.RegionSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyResourcePolicyRequest.to_json( + compute.SetIamPolicyResourcePolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsResourcePolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsResourcePolicyRequest): + The request object. A request message for + ResourcePolicies.TestIamPermissions. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsResourcePolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsResourcePolicyRequest.to_json( + compute.TestIamPermissionsResourcePolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListResourcePoliciesRequest], + compute.ResourcePolicyAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteResourcePolicyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetResourcePolicyRequest], + compute.ResourcePolicy]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyResourcePolicyRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertResourcePolicyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListResourcePoliciesRequest], + compute.ResourcePolicyList]: + return self._list + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyResourcePolicyRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsResourcePolicyRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'ResourcePoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/__init__.py new file mode 100644 index 000000000..51d5ea063 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RoutersClient + +__all__ = ( + 'RoutersClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/client.py new file mode 100644 index 000000000..27078a59b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/client.py @@ -0,0 +1,1275 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.routers import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RoutersTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RoutersRestTransport + + +class RoutersClientMeta(type): + """Metaclass for the Routers client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RoutersTransport]] + _transport_registry["rest"] = RoutersRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RoutersTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RoutersClient(metaclass=RoutersClientMeta): + """The Routers API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RoutersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RoutersClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RoutersTransport: + """Returns the transport used by the client instance. + + Returns: + RoutersTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RoutersTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the routers client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RoutersTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RoutersTransport): + # transport is a RoutersTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListRoutersRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of routers. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListRoutersRequest, dict]): + The request object. A request message for + Routers.AggregatedList. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.routers.pagers.AggregatedListPager: + Contains a list of routers. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListRoutersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListRoutersRequest): + request = compute.AggregatedListRoutersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteRouterRequest, dict] = None, + *, + project: str = None, + region: str = None, + router: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified Router resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRouterRequest, dict]): + The request object. A request message for + Routers.Delete. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router (str): + Name of the Router resource to + delete. + + This corresponds to the ``router`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRouterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRouterRequest): + request = compute.DeleteRouterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router is not None: + request.router = router + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRouterRequest, dict] = None, + *, + project: str = None, + region: str = None, + router: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Router: + r"""Returns the specified Router resource. Gets a list of + available routers by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRouterRequest, dict]): + The request object. A request message for Routers.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router (str): + Name of the Router resource to + return. + + This corresponds to the ``router`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Router: + Represents a Cloud Router resource. + For more information about Cloud Router, + read the Cloud Router overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRouterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRouterRequest): + request = compute.GetRouterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router is not None: + request.router = router + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_nat_mapping_info(self, + request: Union[compute.GetNatMappingInfoRoutersRequest, dict] = None, + *, + project: str = None, + region: str = None, + router: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.GetNatMappingInfoPager: + r"""Retrieves runtime Nat mapping information of VM + endpoints. + + Args: + request (Union[google.cloud.compute_v1.types.GetNatMappingInfoRoutersRequest, dict]): + The request object. A request message for + Routers.GetNatMappingInfo. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router (str): + Name of the Router resource to query + for Nat Mapping information of VM + endpoints. + + This corresponds to the ``router`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.routers.pagers.GetNatMappingInfoPager: + Contains a list of + VmEndpointNatMappings. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetNatMappingInfoRoutersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetNatMappingInfoRoutersRequest): + request = compute.GetNatMappingInfoRoutersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router is not None: + request.router = router + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_nat_mapping_info] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.GetNatMappingInfoPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_router_status(self, + request: Union[compute.GetRouterStatusRouterRequest, dict] = None, + *, + project: str = None, + region: str = None, + router: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.RouterStatusResponse: + r"""Retrieves runtime information of the specified + router. + + Args: + request (Union[google.cloud.compute_v1.types.GetRouterStatusRouterRequest, dict]): + The request object. A request message for + Routers.GetRouterStatus. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router (str): + Name of the Router resource to query. + This corresponds to the ``router`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.RouterStatusResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRouterStatusRouterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRouterStatusRouterRequest): + request = compute.GetRouterStatusRouterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router is not None: + request.router = router + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_router_status] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRouterRequest, dict] = None, + *, + project: str = None, + region: str = None, + router_resource: compute.Router = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a Router resource in the specified project + and region using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRouterRequest, dict]): + The request object. A request message for + Routers.Insert. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + This corresponds to the ``router_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRouterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRouterRequest): + request = compute.InsertRouterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router_resource is not None: + request.router_resource = router_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRoutersRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of Router resources available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListRoutersRequest, dict]): + The request object. A request message for Routers.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.routers.pagers.ListPager: + Contains a list of Router resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRoutersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRoutersRequest): + request = compute.ListRoutersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchRouterRequest, dict] = None, + *, + project: str = None, + region: str = None, + router: str = None, + router_resource: compute.Router = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified Router resource with the data + included in the request. This method supports PATCH + semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRouterRequest, dict]): + The request object. A request message for Routers.Patch. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router (str): + Name of the Router resource to patch. + This corresponds to the ``router`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + This corresponds to the ``router_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router, router_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRouterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRouterRequest): + request = compute.PatchRouterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router is not None: + request.router = router + if router_resource is not None: + request.router_resource = router_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def preview(self, + request: Union[compute.PreviewRouterRequest, dict] = None, + *, + project: str = None, + region: str = None, + router: str = None, + router_resource: compute.Router = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.RoutersPreviewResponse: + r"""Preview fields auto-generated during router create + and update operations. Calling this method does NOT + create or update the router. + + Args: + request (Union[google.cloud.compute_v1.types.PreviewRouterRequest, dict]): + The request object. A request message for + Routers.Preview. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router (str): + Name of the Router resource to query. + This corresponds to the ``router`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + This corresponds to the ``router_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.RoutersPreviewResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router, router_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PreviewRouterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PreviewRouterRequest): + request = compute.PreviewRouterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router is not None: + request.router = router + if router_resource is not None: + request.router_resource = router_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.preview] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateRouterRequest, dict] = None, + *, + project: str = None, + region: str = None, + router: str = None, + router_resource: compute.Router = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified Router resource with the data + included in the request. This method conforms to PUT + semantics, which requests that the state of the target + resource be created or replaced with the state defined + by the representation enclosed in the request message + payload. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateRouterRequest, dict]): + The request object. A request message for + Routers.Update. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router (str): + Name of the Router resource to + update. + + This corresponds to the ``router`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + This corresponds to the ``router_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, router, router_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateRouterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateRouterRequest): + request = compute.UpdateRouterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if router is not None: + request.router = router + if router_resource is not None: + request.router_resource = router_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RoutersClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/pagers.py new file mode 100644 index 000000000..b2281d3f1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/pagers.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RouterAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RouterAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RouterAggregatedList], + request: compute.AggregatedListRoutersRequest, + response: compute.RouterAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListRoutersRequest): + The initial request object. + response (google.cloud.compute_v1.types.RouterAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListRoutersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RouterAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.RoutersScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.RoutersScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class GetNatMappingInfoPager: + """A pager for iterating through ``get_nat_mapping_info`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.VmEndpointNatMappingsList` object, and + provides an ``__iter__`` method to iterate through its + ``result`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``GetNatMappingInfo`` requests and continue to iterate + through the ``result`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.VmEndpointNatMappingsList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.VmEndpointNatMappingsList], + request: compute.GetNatMappingInfoRoutersRequest, + response: compute.VmEndpointNatMappingsList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.GetNatMappingInfoRoutersRequest): + The initial request object. + response (google.cloud.compute_v1.types.VmEndpointNatMappingsList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.GetNatMappingInfoRoutersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.VmEndpointNatMappingsList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.VmEndpointNatMappings]: + for page in self.pages: + yield from page.result + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RouterList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RouterList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RouterList], + request: compute.ListRoutersRequest, + response: compute.RouterList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRoutersRequest): + The initial request object. + response (google.cloud.compute_v1.types.RouterList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRoutersRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RouterList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Router]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/__init__.py new file mode 100644 index 000000000..49be652fd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RoutersTransport +from .rest import RoutersRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RoutersTransport]] +_transport_registry['rest'] = RoutersRestTransport + +__all__ = ( + 'RoutersTransport', + 'RoutersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/base.py new file mode 100644 index 000000000..406ccf862 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/base.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RoutersTransport(abc.ABC): + """Abstract transport class for Routers.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_nat_mapping_info: gapic_v1.method.wrap_method( + self.get_nat_mapping_info, + default_timeout=None, + client_info=client_info, + ), + self.get_router_status: gapic_v1.method.wrap_method( + self.get_router_status, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.preview: gapic_v1.method.wrap_method( + self.preview, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListRoutersRequest], + Union[ + compute.RouterAggregatedList, + Awaitable[compute.RouterAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRouterRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRouterRequest], + Union[ + compute.Router, + Awaitable[compute.Router] + ]]: + raise NotImplementedError() + + @property + def get_nat_mapping_info(self) -> Callable[ + [compute.GetNatMappingInfoRoutersRequest], + Union[ + compute.VmEndpointNatMappingsList, + Awaitable[compute.VmEndpointNatMappingsList] + ]]: + raise NotImplementedError() + + @property + def get_router_status(self) -> Callable[ + [compute.GetRouterStatusRouterRequest], + Union[ + compute.RouterStatusResponse, + Awaitable[compute.RouterStatusResponse] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRouterRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRoutersRequest], + Union[ + compute.RouterList, + Awaitable[compute.RouterList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchRouterRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def preview(self) -> Callable[ + [compute.PreviewRouterRequest], + Union[ + compute.RoutersPreviewResponse, + Awaitable[compute.RoutersPreviewResponse] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateRouterRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RoutersTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/rest.py new file mode 100644 index 000000000..9cd1add0e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routers/transports/rest.py @@ -0,0 +1,1185 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RoutersTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RoutersRestTransport(RoutersTransport): + """REST backend transport for Routers. + + The Routers API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListRoutersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RouterAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListRoutersRequest): + The request object. A request message for + Routers.AggregatedList. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RouterAggregatedList: + Contains a list of routers. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/routers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListRoutersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListRoutersRequest.to_json( + compute.AggregatedListRoutersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RouterAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteRouterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRouterRequest): + The request object. A request message for Routers.Delete. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers/{router}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "router", + "router" + ), + ] + + request_kwargs = compute.DeleteRouterRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRouterRequest.to_json( + compute.DeleteRouterRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRouterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Router: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRouterRequest): + The request object. A request message for Routers.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Router: + Represents a Cloud Router resource. + For more information about Cloud Router, + read the Cloud Router overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers/{router}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "router", + "router" + ), + ] + + request_kwargs = compute.GetRouterRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRouterRequest.to_json( + compute.GetRouterRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Router.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_nat_mapping_info(self, + request: compute.GetNatMappingInfoRoutersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VmEndpointNatMappingsList: + r"""Call the get nat mapping info method over HTTP. + + Args: + request (~.compute.GetNatMappingInfoRoutersRequest): + The request object. A request message for + Routers.GetNatMappingInfo. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VmEndpointNatMappingsList: + Contains a list of + VmEndpointNatMappings. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "router", + "router" + ), + ] + + request_kwargs = compute.GetNatMappingInfoRoutersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetNatMappingInfoRoutersRequest.to_json( + compute.GetNatMappingInfoRoutersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VmEndpointNatMappingsList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_router_status(self, + request: compute.GetRouterStatusRouterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RouterStatusResponse: + r"""Call the get router status method over HTTP. + + Args: + request (~.compute.GetRouterStatusRouterRequest): + The request object. A request message for + Routers.GetRouterStatus. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RouterStatusResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers/{router}/getRouterStatus', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "router", + "router" + ), + ] + + request_kwargs = compute.GetRouterStatusRouterRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRouterStatusRouterRequest.to_json( + compute.GetRouterStatusRouterRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RouterStatusResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRouterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRouterRequest): + The request object. A request message for Routers.Insert. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers', + 'body': 'router_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertRouterRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Router.to_json( + compute.Router( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRouterRequest.to_json( + compute.InsertRouterRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRoutersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RouterList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRoutersRequest): + The request object. A request message for Routers.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RouterList: + Contains a list of Router resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListRoutersRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRoutersRequest.to_json( + compute.ListRoutersRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RouterList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchRouterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchRouterRequest): + The request object. A request message for Routers.Patch. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers/{router}', + 'body': 'router_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "router", + "router" + ), + ] + + request_kwargs = compute.PatchRouterRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Router.to_json( + compute.Router( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRouterRequest.to_json( + compute.PatchRouterRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _preview(self, + request: compute.PreviewRouterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RoutersPreviewResponse: + r"""Call the preview method over HTTP. + + Args: + request (~.compute.PreviewRouterRequest): + The request object. A request message for + Routers.Preview. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RoutersPreviewResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers/{router}/preview', + 'body': 'router_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "router", + "router" + ), + ] + + request_kwargs = compute.PreviewRouterRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Router.to_json( + compute.Router( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PreviewRouterRequest.to_json( + compute.PreviewRouterRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RoutersPreviewResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateRouterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateRouterRequest): + The request object. A request message for Routers.Update. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/regions/{region}/routers/{router}', + 'body': 'router_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "router", + "router" + ), + ] + + request_kwargs = compute.UpdateRouterRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Router.to_json( + compute.Router( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateRouterRequest.to_json( + compute.UpdateRouterRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListRoutersRequest], + compute.RouterAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteRouterRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRouterRequest], + compute.Router]: + return self._get + @ property + def get_nat_mapping_info(self) -> Callable[ + [compute.GetNatMappingInfoRoutersRequest], + compute.VmEndpointNatMappingsList]: + return self._get_nat_mapping_info + @ property + def get_router_status(self) -> Callable[ + [compute.GetRouterStatusRouterRequest], + compute.RouterStatusResponse]: + return self._get_router_status + @ property + def insert(self) -> Callable[ + [compute.InsertRouterRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRoutersRequest], + compute.RouterList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchRouterRequest], + compute.Operation]: + return self._patch + @ property + def preview(self) -> Callable[ + [compute.PreviewRouterRequest], + compute.RoutersPreviewResponse]: + return self._preview + @ property + def update(self) -> Callable[ + [compute.UpdateRouterRequest], + compute.Operation]: + return self._update + def close(self): + self._session.close() + + +__all__=( + 'RoutersRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/__init__.py new file mode 100644 index 000000000..af8452931 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import RoutesClient + +__all__ = ( + 'RoutesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/client.py new file mode 100644 index 000000000..90707a26c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/client.py @@ -0,0 +1,683 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.routes import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import RoutesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import RoutesRestTransport + + +class RoutesClientMeta(type): + """Metaclass for the Routes client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[RoutesTransport]] + _transport_registry["rest"] = RoutesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[RoutesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class RoutesClient(metaclass=RoutesClientMeta): + """The Routes API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RoutesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + RoutesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> RoutesTransport: + """Returns the transport used by the client instance. + + Returns: + RoutesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, RoutesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the routes client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, RoutesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, RoutesTransport): + # transport is a RoutesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteRouteRequest, dict] = None, + *, + project: str = None, + route: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified Route resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteRouteRequest, dict]): + The request object. A request message for Routes.Delete. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + route (str): + Name of the Route resource to delete. + This corresponds to the ``route`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, route]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteRouteRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteRouteRequest): + request = compute.DeleteRouteRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if route is not None: + request.route = route + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetRouteRequest, dict] = None, + *, + project: str = None, + route: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Route: + r"""Returns the specified Route resource. Gets a list of + available routes by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetRouteRequest, dict]): + The request object. A request message for Routes.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + route (str): + Name of the Route resource to return. + This corresponds to the ``route`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Route: + Represents a Route resource. A route + defines a path from VM instances in the + VPC network to a specific destination. + This destination can be inside or + outside the VPC network. For more + information, read the Routes overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, route]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRouteRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRouteRequest): + request = compute.GetRouteRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if route is not None: + request.route = route + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertRouteRequest, dict] = None, + *, + project: str = None, + route_resource: compute.Route = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a Route resource in the specified project + using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertRouteRequest, dict]): + The request object. A request message for Routes.Insert. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + route_resource (google.cloud.compute_v1.types.Route): + The body resource for this request + This corresponds to the ``route_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, route_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertRouteRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertRouteRequest): + request = compute.InsertRouteRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if route_resource is not None: + request.route_resource = route_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListRoutesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of Route resources available to + the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListRoutesRequest, dict]): + The request object. A request message for Routes.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.routes.pagers.ListPager: + Contains a list of Route resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListRoutesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListRoutesRequest): + request = compute.ListRoutesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "RoutesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/pagers.py new file mode 100644 index 000000000..b7d016afa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.RouteList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.RouteList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.RouteList], + request: compute.ListRoutesRequest, + response: compute.RouteList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListRoutesRequest): + The initial request object. + response (google.cloud.compute_v1.types.RouteList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListRoutesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.RouteList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Route]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/__init__.py new file mode 100644 index 000000000..e0096520c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import RoutesTransport +from .rest import RoutesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[RoutesTransport]] +_transport_registry['rest'] = RoutesRestTransport + +__all__ = ( + 'RoutesTransport', + 'RoutesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/base.py new file mode 100644 index 000000000..680f4f46b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class RoutesTransport(abc.ABC): + """Abstract transport class for Routes.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteRouteRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetRouteRequest], + Union[ + compute.Route, + Awaitable[compute.Route] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertRouteRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListRoutesRequest], + Union[ + compute.RouteList, + Awaitable[compute.RouteList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'RoutesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/rest.py new file mode 100644 index 000000000..b2f16d89c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/routes/transports/rest.py @@ -0,0 +1,535 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import RoutesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class RoutesRestTransport(RoutesTransport): + """REST backend transport for Routes. + + The Routes API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteRouteRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteRouteRequest): + The request object. A request message for Routes.Delete. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/routes/{route}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "route", + "route" + ), + ] + + request_kwargs = compute.DeleteRouteRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteRouteRequest.to_json( + compute.DeleteRouteRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetRouteRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Route: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetRouteRequest): + The request object. A request message for Routes.Get. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Route: + Represents a Route resource. A route + defines a path from VM instances in the + VPC network to a specific destination. + This destination can be inside or + outside the VPC network. For more + information, read the Routes overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/routes/{route}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "route", + "route" + ), + ] + + request_kwargs = compute.GetRouteRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRouteRequest.to_json( + compute.GetRouteRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Route.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertRouteRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertRouteRequest): + The request object. A request message for Routes.Insert. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/routes', + 'body': 'route_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertRouteRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Route.to_json( + compute.Route( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertRouteRequest.to_json( + compute.InsertRouteRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListRoutesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.RouteList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListRoutesRequest): + The request object. A request message for Routes.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.RouteList: + Contains a list of Route resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/routes', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListRoutesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListRoutesRequest.to_json( + compute.ListRoutesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.RouteList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteRouteRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetRouteRequest], + compute.Route]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertRouteRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListRoutesRequest], + compute.RouteList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'RoutesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/__init__.py new file mode 100644 index 000000000..9f311cc51 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SecurityPoliciesClient + +__all__ = ( + 'SecurityPoliciesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/client.py new file mode 100644 index 000000000..c2403cfd2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/client.py @@ -0,0 +1,1214 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.security_policies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import SecurityPoliciesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import SecurityPoliciesRestTransport + + +class SecurityPoliciesClientMeta(type): + """Metaclass for the SecurityPolicies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SecurityPoliciesTransport]] + _transport_registry["rest"] = SecurityPoliciesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SecurityPoliciesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SecurityPoliciesClient(metaclass=SecurityPoliciesClientMeta): + """The SecurityPolicies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SecurityPoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SecurityPoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SecurityPoliciesTransport: + """Returns the transport used by the client instance. + + Returns: + SecurityPoliciesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SecurityPoliciesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the security policies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SecurityPoliciesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SecurityPoliciesTransport): + # transport is a SecurityPoliciesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_rule(self, + request: Union[compute.AddRuleSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy: str = None, + security_policy_rule_resource: compute.SecurityPolicyRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Inserts a rule into a security policy. + + Args: + request (Union[google.cloud.compute_v1.types.AddRuleSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.AddRule. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy (str): + Name of the security policy to + update. + + This corresponds to the ``security_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy_rule_resource (google.cloud.compute_v1.types.SecurityPolicyRule): + The body resource for this request + This corresponds to the ``security_policy_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy, security_policy_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddRuleSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddRuleSecurityPolicyRequest): + request = compute.AddRuleSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy is not None: + request.security_policy = security_policy + if security_policy_rule_resource is not None: + request.security_policy_rule_resource = security_policy_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified policy. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy (str): + Name of the security policy to + delete. + + This corresponds to the ``security_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteSecurityPolicyRequest): + request = compute.DeleteSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy is not None: + request.security_policy = security_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SecurityPolicy: + r"""List all of the ordered rules present in a single + specified policy. + + Args: + request (Union[google.cloud.compute_v1.types.GetSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy (str): + Name of the security policy to get. + This corresponds to the ``security_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SecurityPolicy: + Represents a Google Cloud Armor + security policy resource. Only external + backend services that use load balancers + can reference a security policy. For + more information, see Google Cloud Armor + security policy overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetSecurityPolicyRequest): + request = compute.GetSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy is not None: + request.security_policy = security_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_rule(self, + request: Union[compute.GetRuleSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SecurityPolicyRule: + r"""Gets a rule at the specified priority. + + Args: + request (Union[google.cloud.compute_v1.types.GetRuleSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.GetRule. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy (str): + Name of the security policy to which + the queried rule belongs. + + This corresponds to the ``security_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SecurityPolicyRule: + Represents a rule that describes one + or more match conditions along with the + action to be taken when traffic matches + this condition (allow or deny). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetRuleSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetRuleSecurityPolicyRequest): + request = compute.GetRuleSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy is not None: + request.security_policy = security_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy_resource: compute.SecurityPolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a new policy in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy_resource (google.cloud.compute_v1.types.SecurityPolicy): + The body resource for this request + This corresponds to the ``security_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertSecurityPolicyRequest): + request = compute.InsertSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy_resource is not None: + request.security_policy_resource = security_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListSecurityPoliciesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""List all the policies that have been configured for + the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListSecurityPoliciesRequest, dict]): + The request object. A request message for + SecurityPolicies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.security_policies.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListSecurityPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListSecurityPoliciesRequest): + request = compute.ListSecurityPoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_preconfigured_expression_sets(self, + request: Union[compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse: + r"""Gets the current list of preconfigured Web + Application Firewall (WAF) expressions. + + Args: + request (Union[google.cloud.compute_v1.types.ListPreconfiguredExpressionSetsSecurityPoliciesRequest, dict]): + The request object. A request message for + SecurityPolicies.ListPreconfiguredExpressionSets. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SecurityPoliciesListPreconfiguredExpressionSetsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest): + request = compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_preconfigured_expression_sets] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy: str = None, + security_policy_resource: compute.SecurityPolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified policy with the data included + in the request. This cannot be used to be update the + rules in the policy. Please use the per rule methods + like addRule, patchRule, and removeRule instead. + + Args: + request (Union[google.cloud.compute_v1.types.PatchSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy (str): + Name of the security policy to + update. + + This corresponds to the ``security_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy_resource (google.cloud.compute_v1.types.SecurityPolicy): + The body resource for this request + This corresponds to the ``security_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy, security_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchSecurityPolicyRequest): + request = compute.PatchSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy is not None: + request.security_policy = security_policy + if security_policy_resource is not None: + request.security_policy_resource = security_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch_rule(self, + request: Union[compute.PatchRuleSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy: str = None, + security_policy_rule_resource: compute.SecurityPolicyRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches a rule at the specified priority. + + Args: + request (Union[google.cloud.compute_v1.types.PatchRuleSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.PatchRule. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy (str): + Name of the security policy to + update. + + This corresponds to the ``security_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy_rule_resource (google.cloud.compute_v1.types.SecurityPolicyRule): + The body resource for this request + This corresponds to the ``security_policy_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy, security_policy_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchRuleSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchRuleSecurityPolicyRequest): + request = compute.PatchRuleSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy is not None: + request.security_policy = security_policy + if security_policy_rule_resource is not None: + request.security_policy_rule_resource = security_policy_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_rule(self, + request: Union[compute.RemoveRuleSecurityPolicyRequest, dict] = None, + *, + project: str = None, + security_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes a rule at the specified priority. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveRuleSecurityPolicyRequest, dict]): + The request object. A request message for + SecurityPolicies.RemoveRule. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + security_policy (str): + Name of the security policy to + update. + + This corresponds to the ``security_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, security_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveRuleSecurityPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveRuleSecurityPolicyRequest): + request = compute.RemoveRuleSecurityPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if security_policy is not None: + request.security_policy = security_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_rule] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SecurityPoliciesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/pagers.py new file mode 100644 index 000000000..fcae2e99f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SecurityPolicyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SecurityPolicyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SecurityPolicyList], + request: compute.ListSecurityPoliciesRequest, + response: compute.SecurityPolicyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListSecurityPoliciesRequest): + The initial request object. + response (google.cloud.compute_v1.types.SecurityPolicyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListSecurityPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SecurityPolicyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.SecurityPolicy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/__init__.py new file mode 100644 index 000000000..d4dbd8bd4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SecurityPoliciesTransport +from .rest import SecurityPoliciesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SecurityPoliciesTransport]] +_transport_registry['rest'] = SecurityPoliciesRestTransport + +__all__ = ( + 'SecurityPoliciesTransport', + 'SecurityPoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/base.py new file mode 100644 index 000000000..5861ab947 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/base.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SecurityPoliciesTransport(abc.ABC): + """Abstract transport class for SecurityPolicies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_rule: gapic_v1.method.wrap_method( + self.add_rule, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_rule: gapic_v1.method.wrap_method( + self.get_rule, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_preconfigured_expression_sets: gapic_v1.method.wrap_method( + self.list_preconfigured_expression_sets, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.patch_rule: gapic_v1.method.wrap_method( + self.patch_rule, + default_timeout=None, + client_info=client_info, + ), + self.remove_rule: gapic_v1.method.wrap_method( + self.remove_rule, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_rule(self) -> Callable[ + [compute.AddRuleSecurityPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteSecurityPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetSecurityPolicyRequest], + Union[ + compute.SecurityPolicy, + Awaitable[compute.SecurityPolicy] + ]]: + raise NotImplementedError() + + @property + def get_rule(self) -> Callable[ + [compute.GetRuleSecurityPolicyRequest], + Union[ + compute.SecurityPolicyRule, + Awaitable[compute.SecurityPolicyRule] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertSecurityPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListSecurityPoliciesRequest], + Union[ + compute.SecurityPolicyList, + Awaitable[compute.SecurityPolicyList] + ]]: + raise NotImplementedError() + + @property + def list_preconfigured_expression_sets(self) -> Callable[ + [compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest], + Union[ + compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse, + Awaitable[compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchSecurityPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def patch_rule(self) -> Callable[ + [compute.PatchRuleSecurityPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def remove_rule(self) -> Callable[ + [compute.RemoveRuleSecurityPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'SecurityPoliciesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/rest.py new file mode 100644 index 000000000..7b68f0630 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/security_policies/transports/rest.py @@ -0,0 +1,1191 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import SecurityPoliciesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class SecurityPoliciesRestTransport(SecurityPoliciesTransport): + """REST backend transport for SecurityPolicies. + + The SecurityPolicies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_rule(self, + request: compute.AddRuleSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add rule method over HTTP. + + Args: + request (~.compute.AddRuleSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.AddRule. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/addRule', + 'body': 'security_policy_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "security_policy", + "securityPolicy" + ), + ] + + request_kwargs = compute.AddRuleSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SecurityPolicyRule.to_json( + compute.SecurityPolicyRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddRuleSecurityPolicyRequest.to_json( + compute.AddRuleSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/{security_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "security_policy", + "securityPolicy" + ), + ] + + request_kwargs = compute.DeleteSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteSecurityPolicyRequest.to_json( + compute.DeleteSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SecurityPolicy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SecurityPolicy: + Represents a Google Cloud Armor + security policy resource. Only external + backend services that use load balancers + can reference a security policy. For + more information, see Google Cloud Armor + security policy overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/{security_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "security_policy", + "securityPolicy" + ), + ] + + request_kwargs = compute.GetSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetSecurityPolicyRequest.to_json( + compute.GetSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SecurityPolicy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_rule(self, + request: compute.GetRuleSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SecurityPolicyRule: + r"""Call the get rule method over HTTP. + + Args: + request (~.compute.GetRuleSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.GetRule. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SecurityPolicyRule: + Represents a rule that describes one + or more match conditions along with the + action to be taken when traffic matches + this condition (allow or deny). + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/getRule', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "security_policy", + "securityPolicy" + ), + ] + + request_kwargs = compute.GetRuleSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetRuleSecurityPolicyRequest.to_json( + compute.GetRuleSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SecurityPolicyRule.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies', + 'body': 'security_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SecurityPolicy.to_json( + compute.SecurityPolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertSecurityPolicyRequest.to_json( + compute.InsertSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListSecurityPoliciesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SecurityPolicyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListSecurityPoliciesRequest): + The request object. A request message for + SecurityPolicies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SecurityPolicyList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListSecurityPoliciesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListSecurityPoliciesRequest.to_json( + compute.ListSecurityPoliciesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SecurityPolicyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_preconfigured_expression_sets(self, + request: compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse: + r"""Call the list preconfigured + expression sets method over HTTP. + + Args: + request (~.compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest): + The request object. A request message for + SecurityPolicies.ListPreconfiguredExpressionSets. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest.to_json( + compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/{security_policy}', + 'body': 'security_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "security_policy", + "securityPolicy" + ), + ] + + request_kwargs = compute.PatchSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SecurityPolicy.to_json( + compute.SecurityPolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchSecurityPolicyRequest.to_json( + compute.PatchSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch_rule(self, + request: compute.PatchRuleSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch rule method over HTTP. + + Args: + request (~.compute.PatchRuleSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.PatchRule. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/patchRule', + 'body': 'security_policy_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "security_policy", + "securityPolicy" + ), + ] + + request_kwargs = compute.PatchRuleSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SecurityPolicyRule.to_json( + compute.SecurityPolicyRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchRuleSecurityPolicyRequest.to_json( + compute.PatchRuleSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_rule(self, + request: compute.RemoveRuleSecurityPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove rule method over HTTP. + + Args: + request (~.compute.RemoveRuleSecurityPolicyRequest): + The request object. A request message for + SecurityPolicies.RemoveRule. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/removeRule', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "security_policy", + "securityPolicy" + ), + ] + + request_kwargs = compute.RemoveRuleSecurityPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveRuleSecurityPolicyRequest.to_json( + compute.RemoveRuleSecurityPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_rule(self) -> Callable[ + [compute.AddRuleSecurityPolicyRequest], + compute.Operation]: + return self._add_rule + @ property + def delete(self) -> Callable[ + [compute.DeleteSecurityPolicyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetSecurityPolicyRequest], + compute.SecurityPolicy]: + return self._get + @ property + def get_rule(self) -> Callable[ + [compute.GetRuleSecurityPolicyRequest], + compute.SecurityPolicyRule]: + return self._get_rule + @ property + def insert(self) -> Callable[ + [compute.InsertSecurityPolicyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListSecurityPoliciesRequest], + compute.SecurityPolicyList]: + return self._list + @ property + def list_preconfigured_expression_sets(self) -> Callable[ + [compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest], + compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse]: + return self._list_preconfigured_expression_sets + @ property + def patch(self) -> Callable[ + [compute.PatchSecurityPolicyRequest], + compute.Operation]: + return self._patch + @ property + def patch_rule(self) -> Callable[ + [compute.PatchRuleSecurityPolicyRequest], + compute.Operation]: + return self._patch_rule + @ property + def remove_rule(self) -> Callable[ + [compute.RemoveRuleSecurityPolicyRequest], + compute.Operation]: + return self._remove_rule + def close(self): + self._session.close() + + +__all__=( + 'SecurityPoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/__init__.py new file mode 100644 index 000000000..c68d7b389 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ServiceAttachmentsClient + +__all__ = ( + 'ServiceAttachmentsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/client.py new file mode 100644 index 000000000..921e32e49 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/client.py @@ -0,0 +1,1270 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.service_attachments import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ServiceAttachmentsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ServiceAttachmentsRestTransport + + +class ServiceAttachmentsClientMeta(type): + """Metaclass for the ServiceAttachments client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ServiceAttachmentsTransport]] + _transport_registry["rest"] = ServiceAttachmentsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ServiceAttachmentsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ServiceAttachmentsClient(metaclass=ServiceAttachmentsClientMeta): + """The ServiceAttachments API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ServiceAttachmentsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ServiceAttachmentsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ServiceAttachmentsTransport: + """Returns the transport used by the client instance. + + Returns: + ServiceAttachmentsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ServiceAttachmentsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the service attachments client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ServiceAttachmentsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ServiceAttachmentsTransport): + # transport is a ServiceAttachmentsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListServiceAttachmentsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of all ServiceAttachment + resources, regional and global, available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListServiceAttachmentsRequest, dict]): + The request object. A request message for + ServiceAttachments.AggregatedList. See the method + description for details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.service_attachments.pagers.AggregatedListPager: + Contains a list of + ServiceAttachmentsScopedList. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListServiceAttachmentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListServiceAttachmentsRequest): + request = compute.AggregatedListServiceAttachmentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteServiceAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + service_attachment: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified ServiceAttachment in the given + scope + + Args: + request (Union[google.cloud.compute_v1.types.DeleteServiceAttachmentRequest, dict]): + The request object. A request message for + ServiceAttachments.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service_attachment (str): + Name of the ServiceAttachment + resource to delete. + + This corresponds to the ``service_attachment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, service_attachment]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteServiceAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteServiceAttachmentRequest): + request = compute.DeleteServiceAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if service_attachment is not None: + request.service_attachment = service_attachment + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetServiceAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + service_attachment: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.ServiceAttachment: + r"""Returns the specified ServiceAttachment resource in + the given scope. + + Args: + request (Union[google.cloud.compute_v1.types.GetServiceAttachmentRequest, dict]): + The request object. A request message for + ServiceAttachments.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service_attachment (str): + Name of the ServiceAttachment + resource to return. + + This corresponds to the ``service_attachment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.ServiceAttachment: + Represents a ServiceAttachment + resource. A service attachment + represents a service that a producer has + exposed. It encapsulates the load + balancer which fronts the service runs + and a list of NAT IP ranges that the + producers uses to represent the + consumers connecting to the service. + next tag = 20 + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, service_attachment]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetServiceAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetServiceAttachmentRequest): + request = compute.GetServiceAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if service_attachment is not None: + request.service_attachment = service_attachment + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicyServiceAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicyServiceAttachmentRequest, dict]): + The request object. A request message for + ServiceAttachments.GetIamPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicyServiceAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicyServiceAttachmentRequest): + request = compute.GetIamPolicyServiceAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertServiceAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + service_attachment_resource: compute.ServiceAttachment = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a ServiceAttachment in the specified project + in the given scope using the parameters that are + included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertServiceAttachmentRequest, dict]): + The request object. A request message for + ServiceAttachments.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service_attachment_resource (google.cloud.compute_v1.types.ServiceAttachment): + The body resource for this request + This corresponds to the ``service_attachment_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, service_attachment_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertServiceAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertServiceAttachmentRequest): + request = compute.InsertServiceAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if service_attachment_resource is not None: + request.service_attachment_resource = service_attachment_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListServiceAttachmentsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists the ServiceAttachments for a project in the + given scope. + + Args: + request (Union[google.cloud.compute_v1.types.ListServiceAttachmentsRequest, dict]): + The request object. A request message for + ServiceAttachments.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region of this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.service_attachments.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListServiceAttachmentsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListServiceAttachmentsRequest): + request = compute.ListServiceAttachmentsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchServiceAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + service_attachment: str = None, + service_attachment_resource: compute.ServiceAttachment = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified ServiceAttachment resource with + the data included in the request. This method supports + PATCH semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchServiceAttachmentRequest, dict]): + The request object. A request message for + ServiceAttachments.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The region scoping this request and + should conform to RFC1035. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service_attachment (str): + The resource id of the + ServiceAttachment to patch. It should + conform to RFC1035 resource name or be a + string form on an unsigned long number. + + This corresponds to the ``service_attachment`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + service_attachment_resource (google.cloud.compute_v1.types.ServiceAttachment): + The body resource for this request + This corresponds to the ``service_attachment_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, service_attachment, service_attachment_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchServiceAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchServiceAttachmentRequest): + request = compute.PatchServiceAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if service_attachment is not None: + request.service_attachment = service_attachment + if service_attachment_resource is not None: + request.service_attachment_resource = service_attachment_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicyServiceAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_policy_request_resource: compute.RegionSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicyServiceAttachmentRequest, dict]): + The request object. A request message for + ServiceAttachments.SetIamPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + This corresponds to the ``region_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicyServiceAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicyServiceAttachmentRequest): + request = compute.SetIamPolicyServiceAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_policy_request_resource is not None: + request.region_set_policy_request_resource = region_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsServiceAttachmentRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsServiceAttachmentRequest, dict]): + The request object. A request message for + ServiceAttachments.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsServiceAttachmentRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsServiceAttachmentRequest): + request = compute.TestIamPermissionsServiceAttachmentRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ServiceAttachmentsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/pagers.py new file mode 100644 index 000000000..3f09559b4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ServiceAttachmentAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ServiceAttachmentAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ServiceAttachmentAggregatedList], + request: compute.AggregatedListServiceAttachmentsRequest, + response: compute.ServiceAttachmentAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListServiceAttachmentsRequest): + The initial request object. + response (google.cloud.compute_v1.types.ServiceAttachmentAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListServiceAttachmentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ServiceAttachmentAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.ServiceAttachmentsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.ServiceAttachmentsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ServiceAttachmentList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ServiceAttachmentList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ServiceAttachmentList], + request: compute.ListServiceAttachmentsRequest, + response: compute.ServiceAttachmentList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListServiceAttachmentsRequest): + The initial request object. + response (google.cloud.compute_v1.types.ServiceAttachmentList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListServiceAttachmentsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ServiceAttachmentList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.ServiceAttachment]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/__init__.py new file mode 100644 index 000000000..3e1e43425 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ServiceAttachmentsTransport +from .rest import ServiceAttachmentsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ServiceAttachmentsTransport]] +_transport_registry['rest'] = ServiceAttachmentsRestTransport + +__all__ = ( + 'ServiceAttachmentsTransport', + 'ServiceAttachmentsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/base.py new file mode 100644 index 000000000..32c8a66fa --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ServiceAttachmentsTransport(abc.ABC): + """Abstract transport class for ServiceAttachments.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListServiceAttachmentsRequest], + Union[ + compute.ServiceAttachmentAggregatedList, + Awaitable[compute.ServiceAttachmentAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteServiceAttachmentRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetServiceAttachmentRequest], + Union[ + compute.ServiceAttachment, + Awaitable[compute.ServiceAttachment] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyServiceAttachmentRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertServiceAttachmentRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListServiceAttachmentsRequest], + Union[ + compute.ServiceAttachmentList, + Awaitable[compute.ServiceAttachmentList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchServiceAttachmentRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyServiceAttachmentRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsServiceAttachmentRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ServiceAttachmentsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/rest.py new file mode 100644 index 000000000..31a59a30d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/service_attachments/transports/rest.py @@ -0,0 +1,1164 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ServiceAttachmentsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ServiceAttachmentsRestTransport(ServiceAttachmentsTransport): + """REST backend transport for ServiceAttachments. + + The ServiceAttachments API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListServiceAttachmentsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ServiceAttachmentAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListServiceAttachmentsRequest): + The request object. A request message for + ServiceAttachments.AggregatedList. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ServiceAttachmentAggregatedList: + Contains a list of + ServiceAttachmentsScopedList. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/serviceAttachments', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListServiceAttachmentsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListServiceAttachmentsRequest.to_json( + compute.AggregatedListServiceAttachmentsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ServiceAttachmentAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteServiceAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteServiceAttachmentRequest): + The request object. A request message for + ServiceAttachments.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{service_attachment}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "service_attachment", + "serviceAttachment" + ), + ] + + request_kwargs = compute.DeleteServiceAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteServiceAttachmentRequest.to_json( + compute.DeleteServiceAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetServiceAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ServiceAttachment: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetServiceAttachmentRequest): + The request object. A request message for + ServiceAttachments.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ServiceAttachment: + Represents a ServiceAttachment + resource. A service attachment + represents a service that a producer has + exposed. It encapsulates the load + balancer which fronts the service runs + and a list of NAT IP ranges that the + producers uses to represent the + consumers connecting to the service. + next tag = 20 + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{service_attachment}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "service_attachment", + "serviceAttachment" + ), + ] + + request_kwargs = compute.GetServiceAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetServiceAttachmentRequest.to_json( + compute.GetServiceAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ServiceAttachment.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicyServiceAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicyServiceAttachmentRequest): + The request object. A request message for + ServiceAttachments.GetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicyServiceAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicyServiceAttachmentRequest.to_json( + compute.GetIamPolicyServiceAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertServiceAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertServiceAttachmentRequest): + The request object. A request message for + ServiceAttachments.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments', + 'body': 'service_attachment_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertServiceAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ServiceAttachment.to_json( + compute.ServiceAttachment( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertServiceAttachmentRequest.to_json( + compute.InsertServiceAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListServiceAttachmentsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ServiceAttachmentList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListServiceAttachmentsRequest): + The request object. A request message for + ServiceAttachments.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ServiceAttachmentList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListServiceAttachmentsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListServiceAttachmentsRequest.to_json( + compute.ListServiceAttachmentsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ServiceAttachmentList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchServiceAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchServiceAttachmentRequest): + The request object. A request message for + ServiceAttachments.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{service_attachment}', + 'body': 'service_attachment_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "service_attachment", + "serviceAttachment" + ), + ] + + request_kwargs = compute.PatchServiceAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.ServiceAttachment.to_json( + compute.ServiceAttachment( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchServiceAttachmentRequest.to_json( + compute.PatchServiceAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicyServiceAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicyServiceAttachmentRequest): + The request object. A request message for + ServiceAttachments.SetIamPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{resource}/setIamPolicy', + 'body': 'region_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicyServiceAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetPolicyRequest.to_json( + compute.RegionSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicyServiceAttachmentRequest.to_json( + compute.SetIamPolicyServiceAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsServiceAttachmentRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsServiceAttachmentRequest): + The request object. A request message for + ServiceAttachments.TestIamPermissions. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsServiceAttachmentRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsServiceAttachmentRequest.to_json( + compute.TestIamPermissionsServiceAttachmentRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListServiceAttachmentsRequest], + compute.ServiceAttachmentAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteServiceAttachmentRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetServiceAttachmentRequest], + compute.ServiceAttachment]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicyServiceAttachmentRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertServiceAttachmentRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListServiceAttachmentsRequest], + compute.ServiceAttachmentList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchServiceAttachmentRequest], + compute.Operation]: + return self._patch + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicyServiceAttachmentRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsServiceAttachmentRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'ServiceAttachmentsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/__init__.py new file mode 100644 index 000000000..5ba5f557a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SnapshotsClient + +__all__ = ( + 'SnapshotsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/client.py new file mode 100644 index 000000000..35704ad13 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/client.py @@ -0,0 +1,1024 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.snapshots import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import SnapshotsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import SnapshotsRestTransport + + +class SnapshotsClientMeta(type): + """Metaclass for the Snapshots client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SnapshotsTransport]] + _transport_registry["rest"] = SnapshotsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SnapshotsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SnapshotsClient(metaclass=SnapshotsClientMeta): + """The Snapshots API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SnapshotsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SnapshotsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SnapshotsTransport: + """Returns the transport used by the client instance. + + Returns: + SnapshotsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SnapshotsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the snapshots client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SnapshotsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SnapshotsTransport): + # transport is a SnapshotsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteSnapshotRequest, dict] = None, + *, + project: str = None, + snapshot: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified Snapshot resource. Keep in mind + that deleting a single snapshot might not necessarily + delete all the data on that snapshot. If any data on the + snapshot that is marked for deletion is needed for + subsequent snapshots, the data will be moved to the next + corresponding snapshot. For more information, see + Deleting snapshots. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteSnapshotRequest, dict]): + The request object. A request message for + Snapshots.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot (str): + Name of the Snapshot resource to + delete. + + This corresponds to the ``snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, snapshot]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteSnapshotRequest): + request = compute.DeleteSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if snapshot is not None: + request.snapshot = snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetSnapshotRequest, dict] = None, + *, + project: str = None, + snapshot: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Snapshot: + r"""Returns the specified Snapshot resource. Gets a list + of available snapshots by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetSnapshotRequest, dict]): + The request object. A request message for Snapshots.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot (str): + Name of the Snapshot resource to + return. + + This corresponds to the ``snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Snapshot: + Represents a Persistent Disk Snapshot + resource. You can use snapshots to back + up data on a regular interval. For more + information, read Creating persistent + disk snapshots. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, snapshot]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetSnapshotRequest): + request = compute.GetSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if snapshot is not None: + request.snapshot = snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicySnapshotRequest, dict] = None, + *, + project: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicySnapshotRequest, dict]): + The request object. A request message for + Snapshots.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicySnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicySnapshotRequest): + request = compute.GetIamPolicySnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListSnapshotsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of Snapshot resources contained + within the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListSnapshotsRequest, dict]): + The request object. A request message for + Snapshots.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.snapshots.pagers.ListPager: + Contains a list of Snapshot + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListSnapshotsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListSnapshotsRequest): + request = compute.ListSnapshotsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicySnapshotRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_policy_request_resource: compute.GlobalSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicySnapshotRequest, dict]): + The request object. A request message for + Snapshots.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + This corresponds to the ``global_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicySnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicySnapshotRequest): + request = compute.SetIamPolicySnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_policy_request_resource is not None: + request.global_set_policy_request_resource = global_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsSnapshotRequest, dict] = None, + *, + project: str = None, + resource: str = None, + global_set_labels_request_resource: compute.GlobalSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on a snapshot. To learn more about + labels, read the Labeling Resources documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsSnapshotRequest, dict]): + The request object. A request message for + Snapshots.SetLabels. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + This corresponds to the ``global_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, global_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsSnapshotRequest): + request = compute.SetLabelsSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if global_set_labels_request_resource is not None: + request.global_set_labels_request_resource = global_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsSnapshotRequest, dict] = None, + *, + project: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsSnapshotRequest, dict]): + The request object. A request message for + Snapshots.TestIamPermissions. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsSnapshotRequest): + request = compute.TestIamPermissionsSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SnapshotsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/pagers.py new file mode 100644 index 000000000..6a9880364 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SnapshotList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SnapshotList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SnapshotList], + request: compute.ListSnapshotsRequest, + response: compute.SnapshotList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListSnapshotsRequest): + The initial request object. + response (google.cloud.compute_v1.types.SnapshotList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListSnapshotsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SnapshotList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Snapshot]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/__init__.py new file mode 100644 index 000000000..5d6d1ebe2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SnapshotsTransport +from .rest import SnapshotsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SnapshotsTransport]] +_transport_registry['rest'] = SnapshotsRestTransport + +__all__ = ( + 'SnapshotsTransport', + 'SnapshotsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/base.py new file mode 100644 index 000000000..e9084297e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SnapshotsTransport(abc.ABC): + """Abstract transport class for Snapshots.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteSnapshotRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetSnapshotRequest], + Union[ + compute.Snapshot, + Awaitable[compute.Snapshot] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicySnapshotRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListSnapshotsRequest], + Union[ + compute.SnapshotList, + Awaitable[compute.SnapshotList] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicySnapshotRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsSnapshotRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsSnapshotRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'SnapshotsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/rest.py new file mode 100644 index 000000000..f507d2ed7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/snapshots/transports/rest.py @@ -0,0 +1,922 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import SnapshotsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class SnapshotsRestTransport(SnapshotsTransport): + """REST backend transport for Snapshots. + + The Snapshots API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteSnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteSnapshotRequest): + The request object. A request message for + Snapshots.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/snapshots/{snapshot}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "snapshot", + "snapshot" + ), + ] + + request_kwargs = compute.DeleteSnapshotRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteSnapshotRequest.to_json( + compute.DeleteSnapshotRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetSnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Snapshot: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetSnapshotRequest): + The request object. A request message for Snapshots.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Snapshot: + Represents a Persistent Disk Snapshot + resource. You can use snapshots to back + up data on a regular interval. For more + information, read Creating persistent + disk snapshots. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/snapshots/{snapshot}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "snapshot", + "snapshot" + ), + ] + + request_kwargs = compute.GetSnapshotRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetSnapshotRequest.to_json( + compute.GetSnapshotRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Snapshot.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicySnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicySnapshotRequest): + The request object. A request message for + Snapshots.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/snapshots/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicySnapshotRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicySnapshotRequest.to_json( + compute.GetIamPolicySnapshotRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListSnapshotsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SnapshotList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListSnapshotsRequest): + The request object. A request message for Snapshots.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SnapshotList: + Contains a list of Snapshot + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/snapshots', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListSnapshotsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListSnapshotsRequest.to_json( + compute.ListSnapshotsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SnapshotList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicySnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicySnapshotRequest): + The request object. A request message for + Snapshots.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/snapshots/{resource}/setIamPolicy', + 'body': 'global_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicySnapshotRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetPolicyRequest.to_json( + compute.GlobalSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicySnapshotRequest.to_json( + compute.SetIamPolicySnapshotRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsSnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsSnapshotRequest): + The request object. A request message for + Snapshots.SetLabels. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/snapshots/{resource}/setLabels', + 'body': 'global_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetLabelsSnapshotRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.GlobalSetLabelsRequest.to_json( + compute.GlobalSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsSnapshotRequest.to_json( + compute.SetLabelsSnapshotRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsSnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsSnapshotRequest): + The request object. A request message for + Snapshots.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/snapshots/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsSnapshotRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsSnapshotRequest.to_json( + compute.TestIamPermissionsSnapshotRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteSnapshotRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetSnapshotRequest], + compute.Snapshot]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicySnapshotRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def list(self) -> Callable[ + [compute.ListSnapshotsRequest], + compute.SnapshotList]: + return self._list + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicySnapshotRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsSnapshotRequest], + compute.Operation]: + return self._set_labels + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsSnapshotRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'SnapshotsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/__init__.py new file mode 100644 index 000000000..5ec80a8c1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SslCertificatesClient + +__all__ = ( + 'SslCertificatesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/client.py new file mode 100644 index 000000000..b37944c5f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/client.py @@ -0,0 +1,782 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.ssl_certificates import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import SslCertificatesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import SslCertificatesRestTransport + + +class SslCertificatesClientMeta(type): + """Metaclass for the SslCertificates client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SslCertificatesTransport]] + _transport_registry["rest"] = SslCertificatesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SslCertificatesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SslCertificatesClient(metaclass=SslCertificatesClientMeta): + """The SslCertificates API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SslCertificatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SslCertificatesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SslCertificatesTransport: + """Returns the transport used by the client instance. + + Returns: + SslCertificatesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SslCertificatesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the ssl certificates client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SslCertificatesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SslCertificatesTransport): + # transport is a SslCertificatesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListSslCertificatesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of all SslCertificate resources, + regional and global, available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListSslCertificatesRequest, dict]): + The request object. A request message for + SslCertificates.AggregatedList. See the method + description for details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.ssl_certificates.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListSslCertificatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListSslCertificatesRequest): + request = compute.AggregatedListSslCertificatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteSslCertificateRequest, dict] = None, + *, + project: str = None, + ssl_certificate: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified SslCertificate resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteSslCertificateRequest, dict]): + The request object. A request message for + SslCertificates.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_certificate (str): + Name of the SslCertificate resource + to delete. + + This corresponds to the ``ssl_certificate`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, ssl_certificate]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteSslCertificateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteSslCertificateRequest): + request = compute.DeleteSslCertificateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if ssl_certificate is not None: + request.ssl_certificate = ssl_certificate + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetSslCertificateRequest, dict] = None, + *, + project: str = None, + ssl_certificate: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SslCertificate: + r"""Returns the specified SslCertificate resource. Gets a + list of available SSL certificates by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetSslCertificateRequest, dict]): + The request object. A request message for + SslCertificates.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_certificate (str): + Name of the SslCertificate resource + to return. + + This corresponds to the ``ssl_certificate`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SslCertificate: + Represents an SSL Certificate resource. Google Compute + Engine has two SSL Certificate resources: \* + [Global](/compute/docs/reference/rest/v1/sslCertificates) + \* + [Regional](/compute/docs/reference/rest/v1/regionSslCertificates) + The sslCertificates are used by: - external HTTPS load + balancers - SSL proxy load balancers The + regionSslCertificates are used by internal HTTPS load + balancers. Optionally, certificate file contents that + you upload can contain a set of up to five PEM-encoded + certificates. The API call creates an object + (sslCertificate) that holds this data. You can use SSL + keys and certificates to secure connections to a load + balancer. For more information, read Creating and using + SSL certificates, SSL certificates quotas and limits, + and Troubleshooting SSL certificates. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, ssl_certificate]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetSslCertificateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetSslCertificateRequest): + request = compute.GetSslCertificateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if ssl_certificate is not None: + request.ssl_certificate = ssl_certificate + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertSslCertificateRequest, dict] = None, + *, + project: str = None, + ssl_certificate_resource: compute.SslCertificate = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a SslCertificate resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertSslCertificateRequest, dict]): + The request object. A request message for + SslCertificates.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_certificate_resource (google.cloud.compute_v1.types.SslCertificate): + The body resource for this request + This corresponds to the ``ssl_certificate_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, ssl_certificate_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertSslCertificateRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertSslCertificateRequest): + request = compute.InsertSslCertificateRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if ssl_certificate_resource is not None: + request.ssl_certificate_resource = ssl_certificate_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListSslCertificatesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of SslCertificate resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListSslCertificatesRequest, dict]): + The request object. A request message for + SslCertificates.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.ssl_certificates.pagers.ListPager: + Contains a list of SslCertificate + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListSslCertificatesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListSslCertificatesRequest): + request = compute.ListSslCertificatesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SslCertificatesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/pagers.py new file mode 100644 index 000000000..fc8141caf --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SslCertificateAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SslCertificateAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SslCertificateAggregatedList], + request: compute.AggregatedListSslCertificatesRequest, + response: compute.SslCertificateAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListSslCertificatesRequest): + The initial request object. + response (google.cloud.compute_v1.types.SslCertificateAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListSslCertificatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SslCertificateAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.SslCertificatesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.SslCertificatesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SslCertificateList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SslCertificateList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SslCertificateList], + request: compute.ListSslCertificatesRequest, + response: compute.SslCertificateList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListSslCertificatesRequest): + The initial request object. + response (google.cloud.compute_v1.types.SslCertificateList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListSslCertificatesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SslCertificateList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.SslCertificate]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/__init__.py new file mode 100644 index 000000000..1ff0d5604 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SslCertificatesTransport +from .rest import SslCertificatesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SslCertificatesTransport]] +_transport_registry['rest'] = SslCertificatesRestTransport + +__all__ = ( + 'SslCertificatesTransport', + 'SslCertificatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/base.py new file mode 100644 index 000000000..2bdff1735 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SslCertificatesTransport(abc.ABC): + """Abstract transport class for SslCertificates.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListSslCertificatesRequest], + Union[ + compute.SslCertificateAggregatedList, + Awaitable[compute.SslCertificateAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteSslCertificateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetSslCertificateRequest], + Union[ + compute.SslCertificate, + Awaitable[compute.SslCertificate] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertSslCertificateRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListSslCertificatesRequest], + Union[ + compute.SslCertificateList, + Awaitable[compute.SslCertificateList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'SslCertificatesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/rest.py new file mode 100644 index 000000000..dbe1df172 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_certificates/transports/rest.py @@ -0,0 +1,641 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import SslCertificatesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class SslCertificatesRestTransport(SslCertificatesTransport): + """REST backend transport for SslCertificates. + + The SslCertificates API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListSslCertificatesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslCertificateAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListSslCertificatesRequest): + The request object. A request message for + SslCertificates.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslCertificateAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/sslCertificates', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListSslCertificatesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListSslCertificatesRequest.to_json( + compute.AggregatedListSslCertificatesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslCertificateAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteSslCertificateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteSslCertificateRequest): + The request object. A request message for + SslCertificates.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "ssl_certificate", + "sslCertificate" + ), + ] + + request_kwargs = compute.DeleteSslCertificateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteSslCertificateRequest.to_json( + compute.DeleteSslCertificateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetSslCertificateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslCertificate: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetSslCertificateRequest): + The request object. A request message for + SslCertificates.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslCertificate: + Represents an SSL Certificate resource. Google Compute + Engine has two SSL Certificate resources: \* + `Global `__ + \* + `Regional `__ + The sslCertificates are used by: - external HTTPS load + balancers - SSL proxy load balancers The + regionSslCertificates are used by internal HTTPS load + balancers. Optionally, certificate file contents that + you upload can contain a set of up to five PEM-encoded + certificates. The API call creates an object + (sslCertificate) that holds this data. You can use SSL + keys and certificates to secure connections to a load + balancer. For more information, read Creating and using + SSL certificates, SSL certificates quotas and limits, + and Troubleshooting SSL certificates. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "ssl_certificate", + "sslCertificate" + ), + ] + + request_kwargs = compute.GetSslCertificateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetSslCertificateRequest.to_json( + compute.GetSslCertificateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslCertificate.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertSslCertificateRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertSslCertificateRequest): + The request object. A request message for + SslCertificates.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/sslCertificates', + 'body': 'ssl_certificate_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertSslCertificateRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SslCertificate.to_json( + compute.SslCertificate( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertSslCertificateRequest.to_json( + compute.InsertSslCertificateRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListSslCertificatesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslCertificateList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListSslCertificatesRequest): + The request object. A request message for + SslCertificates.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslCertificateList: + Contains a list of SslCertificate + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/sslCertificates', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListSslCertificatesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListSslCertificatesRequest.to_json( + compute.ListSslCertificatesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslCertificateList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListSslCertificatesRequest], + compute.SslCertificateAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteSslCertificateRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetSslCertificateRequest], + compute.SslCertificate]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertSslCertificateRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListSslCertificatesRequest], + compute.SslCertificateList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'SslCertificatesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/__init__.py new file mode 100644 index 000000000..cc947cf85 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SslPoliciesClient + +__all__ = ( + 'SslPoliciesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/client.py new file mode 100644 index 000000000..9894d9f36 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/client.py @@ -0,0 +1,858 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.ssl_policies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import SslPoliciesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import SslPoliciesRestTransport + + +class SslPoliciesClientMeta(type): + """Metaclass for the SslPolicies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SslPoliciesTransport]] + _transport_registry["rest"] = SslPoliciesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SslPoliciesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SslPoliciesClient(metaclass=SslPoliciesClientMeta): + """The SslPolicies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SslPoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SslPoliciesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SslPoliciesTransport: + """Returns the transport used by the client instance. + + Returns: + SslPoliciesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SslPoliciesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the ssl policies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SslPoliciesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SslPoliciesTransport): + # transport is a SslPoliciesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteSslPolicyRequest, dict] = None, + *, + project: str = None, + ssl_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified SSL policy. The SSL policy + resource can be deleted only if it is not in use by any + TargetHttpsProxy or TargetSslProxy resources. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteSslPolicyRequest, dict]): + The request object. A request message for + SslPolicies.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_policy (str): + Name of the SSL policy to delete. The + name must be 1-63 characters long, and + comply with RFC1035. + + This corresponds to the ``ssl_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, ssl_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteSslPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteSslPolicyRequest): + request = compute.DeleteSslPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if ssl_policy is not None: + request.ssl_policy = ssl_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetSslPolicyRequest, dict] = None, + *, + project: str = None, + ssl_policy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SslPolicy: + r"""Lists all of the ordered rules present in a single + specified policy. + + Args: + request (Union[google.cloud.compute_v1.types.GetSslPolicyRequest, dict]): + The request object. A request message for + SslPolicies.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_policy (str): + Name of the SSL policy to update. The + name must be 1-63 characters long, and + comply with RFC1035. + + This corresponds to the ``ssl_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SslPolicy: + Represents an SSL Policy resource. + Use SSL policies to control the SSL + features, such as versions and cipher + suites, offered by an HTTPS or SSL Proxy + load balancer. For more information, + read SSL Policy Concepts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, ssl_policy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetSslPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetSslPolicyRequest): + request = compute.GetSslPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if ssl_policy is not None: + request.ssl_policy = ssl_policy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertSslPolicyRequest, dict] = None, + *, + project: str = None, + ssl_policy_resource: compute.SslPolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Returns the specified SSL policy resource. Gets a + list of available SSL policies by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertSslPolicyRequest, dict]): + The request object. A request message for + SslPolicies.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_policy_resource (google.cloud.compute_v1.types.SslPolicy): + The body resource for this request + This corresponds to the ``ssl_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, ssl_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertSslPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertSslPolicyRequest): + request = compute.InsertSslPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if ssl_policy_resource is not None: + request.ssl_policy_resource = ssl_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListSslPoliciesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists all the SSL policies that have been configured + for the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListSslPoliciesRequest, dict]): + The request object. A request message for + SslPolicies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.ssl_policies.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListSslPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListSslPoliciesRequest): + request = compute.ListSslPoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_available_features(self, + request: Union[compute.ListAvailableFeaturesSslPoliciesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.SslPoliciesListAvailableFeaturesResponse: + r"""Lists all features that can be specified in the SSL + policy when using custom profile. + + Args: + request (Union[google.cloud.compute_v1.types.ListAvailableFeaturesSslPoliciesRequest, dict]): + The request object. A request message for + SslPolicies.ListAvailableFeatures. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.SslPoliciesListAvailableFeaturesResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListAvailableFeaturesSslPoliciesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListAvailableFeaturesSslPoliciesRequest): + request = compute.ListAvailableFeaturesSslPoliciesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_available_features] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchSslPolicyRequest, dict] = None, + *, + project: str = None, + ssl_policy: str = None, + ssl_policy_resource: compute.SslPolicy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified SSL policy with the data + included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.PatchSslPolicyRequest, dict]): + The request object. A request message for + SslPolicies.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_policy (str): + Name of the SSL policy to update. The + name must be 1-63 characters long, and + comply with RFC1035. + + This corresponds to the ``ssl_policy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_policy_resource (google.cloud.compute_v1.types.SslPolicy): + The body resource for this request + This corresponds to the ``ssl_policy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, ssl_policy, ssl_policy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchSslPolicyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchSslPolicyRequest): + request = compute.PatchSslPolicyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if ssl_policy is not None: + request.ssl_policy = ssl_policy + if ssl_policy_resource is not None: + request.ssl_policy_resource = ssl_policy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SslPoliciesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/pagers.py new file mode 100644 index 000000000..2e4543de2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SslPoliciesList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SslPoliciesList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SslPoliciesList], + request: compute.ListSslPoliciesRequest, + response: compute.SslPoliciesList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListSslPoliciesRequest): + The initial request object. + response (google.cloud.compute_v1.types.SslPoliciesList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListSslPoliciesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SslPoliciesList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.SslPolicy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/__init__.py new file mode 100644 index 000000000..de12afa63 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SslPoliciesTransport +from .rest import SslPoliciesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SslPoliciesTransport]] +_transport_registry['rest'] = SslPoliciesRestTransport + +__all__ = ( + 'SslPoliciesTransport', + 'SslPoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/base.py new file mode 100644 index 000000000..84c7c0a89 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SslPoliciesTransport(abc.ABC): + """Abstract transport class for SslPolicies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_available_features: gapic_v1.method.wrap_method( + self.list_available_features, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteSslPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetSslPolicyRequest], + Union[ + compute.SslPolicy, + Awaitable[compute.SslPolicy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertSslPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListSslPoliciesRequest], + Union[ + compute.SslPoliciesList, + Awaitable[compute.SslPoliciesList] + ]]: + raise NotImplementedError() + + @property + def list_available_features(self) -> Callable[ + [compute.ListAvailableFeaturesSslPoliciesRequest], + Union[ + compute.SslPoliciesListAvailableFeaturesResponse, + Awaitable[compute.SslPoliciesListAvailableFeaturesResponse] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchSslPolicyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'SslPoliciesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/rest.py new file mode 100644 index 000000000..4c8b1dea5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/ssl_policies/transports/rest.py @@ -0,0 +1,747 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import SslPoliciesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class SslPoliciesRestTransport(SslPoliciesTransport): + """REST backend transport for SslPolicies. + + The SslPolicies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteSslPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteSslPolicyRequest): + The request object. A request message for + SslPolicies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "ssl_policy", + "sslPolicy" + ), + ] + + request_kwargs = compute.DeleteSslPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteSslPolicyRequest.to_json( + compute.DeleteSslPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetSslPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslPolicy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetSslPolicyRequest): + The request object. A request message for + SslPolicies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslPolicy: + Represents an SSL Policy resource. + Use SSL policies to control the SSL + features, such as versions and cipher + suites, offered by an HTTPS or SSL Proxy + load balancer. For more information, + read SSL Policy Concepts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "ssl_policy", + "sslPolicy" + ), + ] + + request_kwargs = compute.GetSslPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetSslPolicyRequest.to_json( + compute.GetSslPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslPolicy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertSslPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertSslPolicyRequest): + The request object. A request message for + SslPolicies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/sslPolicies', + 'body': 'ssl_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertSslPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SslPolicy.to_json( + compute.SslPolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertSslPolicyRequest.to_json( + compute.InsertSslPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListSslPoliciesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslPoliciesList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListSslPoliciesRequest): + The request object. A request message for + SslPolicies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslPoliciesList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/sslPolicies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListSslPoliciesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListSslPoliciesRequest.to_json( + compute.ListSslPoliciesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslPoliciesList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_available_features(self, + request: compute.ListAvailableFeaturesSslPoliciesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SslPoliciesListAvailableFeaturesResponse: + r"""Call the list available features method over HTTP. + + Args: + request (~.compute.ListAvailableFeaturesSslPoliciesRequest): + The request object. A request message for + SslPolicies.ListAvailableFeatures. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SslPoliciesListAvailableFeaturesResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/sslPolicies/listAvailableFeatures', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListAvailableFeaturesSslPoliciesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListAvailableFeaturesSslPoliciesRequest.to_json( + compute.ListAvailableFeaturesSslPoliciesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SslPoliciesListAvailableFeaturesResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchSslPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchSslPolicyRequest): + The request object. A request message for + SslPolicies.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}', + 'body': 'ssl_policy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "ssl_policy", + "sslPolicy" + ), + ] + + request_kwargs = compute.PatchSslPolicyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SslPolicy.to_json( + compute.SslPolicy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchSslPolicyRequest.to_json( + compute.PatchSslPolicyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteSslPolicyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetSslPolicyRequest], + compute.SslPolicy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertSslPolicyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListSslPoliciesRequest], + compute.SslPoliciesList]: + return self._list + @ property + def list_available_features(self) -> Callable[ + [compute.ListAvailableFeaturesSslPoliciesRequest], + compute.SslPoliciesListAvailableFeaturesResponse]: + return self._list_available_features + @ property + def patch(self) -> Callable[ + [compute.PatchSslPolicyRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'SslPoliciesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/__init__.py new file mode 100644 index 000000000..eef696424 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import SubnetworksClient + +__all__ = ( + 'SubnetworksClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/client.py new file mode 100644 index 000000000..5cfbf104a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/client.py @@ -0,0 +1,1560 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.subnetworks import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import SubnetworksTransport, DEFAULT_CLIENT_INFO +from .transports.rest import SubnetworksRestTransport + + +class SubnetworksClientMeta(type): + """Metaclass for the Subnetworks client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[SubnetworksTransport]] + _transport_registry["rest"] = SubnetworksRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[SubnetworksTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class SubnetworksClient(metaclass=SubnetworksClientMeta): + """The Subnetworks API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SubnetworksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + SubnetworksClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> SubnetworksTransport: + """Returns the transport used by the client instance. + + Returns: + SubnetworksTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, SubnetworksTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the subnetworks client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, SubnetworksTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, SubnetworksTransport): + # transport is a SubnetworksTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListSubnetworksRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of subnetworks. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListSubnetworksRequest, dict]): + The request object. A request message for + Subnetworks.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.subnetworks.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListSubnetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListSubnetworksRequest): + request = compute.AggregatedListSubnetworksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteSubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + subnetwork: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified subnetwork. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteSubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetwork (str): + Name of the Subnetwork resource to + delete. + + This corresponds to the ``subnetwork`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, subnetwork]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteSubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteSubnetworkRequest): + request = compute.DeleteSubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if subnetwork is not None: + request.subnetwork = subnetwork + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def expand_ip_cidr_range(self, + request: Union[compute.ExpandIpCidrRangeSubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + subnetwork: str = None, + subnetworks_expand_ip_cidr_range_request_resource: compute.SubnetworksExpandIpCidrRangeRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Expands the IP CIDR range of the subnetwork to a + specified value. + + Args: + request (Union[google.cloud.compute_v1.types.ExpandIpCidrRangeSubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.ExpandIpCidrRange. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetwork (str): + Name of the Subnetwork resource to + update. + + This corresponds to the ``subnetwork`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetworks_expand_ip_cidr_range_request_resource (google.cloud.compute_v1.types.SubnetworksExpandIpCidrRangeRequest): + The body resource for this request + This corresponds to the ``subnetworks_expand_ip_cidr_range_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, subnetwork, subnetworks_expand_ip_cidr_range_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ExpandIpCidrRangeSubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ExpandIpCidrRangeSubnetworkRequest): + request = compute.ExpandIpCidrRangeSubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if subnetwork is not None: + request.subnetwork = subnetwork + if subnetworks_expand_ip_cidr_range_request_resource is not None: + request.subnetworks_expand_ip_cidr_range_request_resource = subnetworks_expand_ip_cidr_range_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.expand_ip_cidr_range] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetSubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + subnetwork: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Subnetwork: + r"""Returns the specified subnetwork. Gets a list of + available subnetworks list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetSubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetwork (str): + Name of the Subnetwork resource to + return. + + This corresponds to the ``subnetwork`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Subnetwork: + Represents a Subnetwork resource. A + subnetwork (also known as a subnet) is a + logical partition of a Virtual Private + Cloud network with one primary IP range + and zero or more secondary IP ranges. + For more information, read Virtual + Private Cloud (VPC) Network. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, subnetwork]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetSubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetSubnetworkRequest): + request = compute.GetSubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if subnetwork is not None: + request.subnetwork = subnetwork + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Union[compute.GetIamPolicySubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Gets the access control policy for a resource. May be + empty if no such policy or resource exists. + + Args: + request (Union[google.cloud.compute_v1.types.GetIamPolicySubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.GetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetIamPolicySubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetIamPolicySubnetworkRequest): + request = compute.GetIamPolicySubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertSubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + subnetwork_resource: compute.Subnetwork = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a subnetwork in the specified project using + the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertSubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetwork_resource (google.cloud.compute_v1.types.Subnetwork): + The body resource for this request + This corresponds to the ``subnetwork_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, subnetwork_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertSubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertSubnetworkRequest): + request = compute.InsertSubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if subnetwork_resource is not None: + request.subnetwork_resource = subnetwork_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListSubnetworksRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of subnetworks available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListSubnetworksRequest, dict]): + The request object. A request message for + Subnetworks.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.subnetworks.pagers.ListPager: + Contains a list of Subnetwork + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListSubnetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListSubnetworksRequest): + request = compute.ListSubnetworksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_usable(self, + request: Union[compute.ListUsableSubnetworksRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListUsablePager: + r"""Retrieves an aggregated list of all usable + subnetworks in the project. + + Args: + request (Union[google.cloud.compute_v1.types.ListUsableSubnetworksRequest, dict]): + The request object. A request message for + Subnetworks.ListUsable. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.subnetworks.pagers.ListUsablePager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListUsableSubnetworksRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListUsableSubnetworksRequest): + request = compute.ListUsableSubnetworksRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_usable] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListUsablePager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchSubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + subnetwork: str = None, + subnetwork_resource: compute.Subnetwork = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified subnetwork with the data + included in the request. Only certain fields can be + updated with a patch request as indicated in the field + descriptions. You must specify the current fingerprint + of the subnetwork resource being patched. + + Args: + request (Union[google.cloud.compute_v1.types.PatchSubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetwork (str): + Name of the Subnetwork resource to + patch. + + This corresponds to the ``subnetwork`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetwork_resource (google.cloud.compute_v1.types.Subnetwork): + The body resource for this request + This corresponds to the ``subnetwork_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, subnetwork, subnetwork_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchSubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchSubnetworkRequest): + request = compute.PatchSubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if subnetwork is not None: + request.subnetwork = subnetwork + if subnetwork_resource is not None: + request.subnetwork_resource = subnetwork_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Union[compute.SetIamPolicySubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_policy_request_resource: compute.RegionSetPolicyRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Policy: + r"""Sets the access control policy on the specified + resource. Replaces any existing policy. + + Args: + request (Union[google.cloud.compute_v1.types.SetIamPolicySubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.SetIamPolicy. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + This corresponds to the ``region_set_policy_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + Policy is a collection of bindings. A binding binds one + or more members to a single role. Members can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A role is a named list of + permissions; each role can be an IAM predefined role or + a user-created custom role. For some types of Google + Cloud resources, a binding can also specify a condition, + which is a logical expression that allows access to a + resource only if the expression evaluates to true. A + condition can add constraints based on attributes of the + request, the resource, or both. To learn which resources + support conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:\ mike@example.com - + group:\ admins@example.com - domain:google.com - + serviceAccount:\ my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:\ eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_policy_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetIamPolicySubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetIamPolicySubnetworkRequest): + request = compute.SetIamPolicySubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_policy_request_resource is not None: + request.region_set_policy_request_resource = region_set_policy_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_private_ip_google_access(self, + request: Union[compute.SetPrivateIpGoogleAccessSubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + subnetwork: str = None, + subnetworks_set_private_ip_google_access_request_resource: compute.SubnetworksSetPrivateIpGoogleAccessRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Set whether VMs in this subnet can access Google + services without assigning external IP addresses through + Private Google Access. + + Args: + request (Union[google.cloud.compute_v1.types.SetPrivateIpGoogleAccessSubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.SetPrivateIpGoogleAccess. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetwork (str): + Name of the Subnetwork resource. + This corresponds to the ``subnetwork`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + subnetworks_set_private_ip_google_access_request_resource (google.cloud.compute_v1.types.SubnetworksSetPrivateIpGoogleAccessRequest): + The body resource for this request + This corresponds to the ``subnetworks_set_private_ip_google_access_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, subnetwork, subnetworks_set_private_ip_google_access_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetPrivateIpGoogleAccessSubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetPrivateIpGoogleAccessSubnetworkRequest): + request = compute.SetPrivateIpGoogleAccessSubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if subnetwork is not None: + request.subnetwork = subnetwork + if subnetworks_set_private_ip_google_access_request_resource is not None: + request.subnetworks_set_private_ip_google_access_request_resource = subnetworks_set_private_ip_google_access_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_private_ip_google_access] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsSubnetworkRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsSubnetworkRequest, dict]): + The request object. A request message for + Subnetworks.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsSubnetworkRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsSubnetworkRequest): + request = compute.TestIamPermissionsSubnetworkRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "SubnetworksClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/pagers.py new file mode 100644 index 000000000..a3ab35f34 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/pagers.py @@ -0,0 +1,198 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SubnetworkAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SubnetworkAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SubnetworkAggregatedList], + request: compute.AggregatedListSubnetworksRequest, + response: compute.SubnetworkAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListSubnetworksRequest): + The initial request object. + response (google.cloud.compute_v1.types.SubnetworkAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SubnetworkAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.SubnetworksScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.SubnetworksScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.SubnetworkList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.SubnetworkList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.SubnetworkList], + request: compute.ListSubnetworksRequest, + response: compute.SubnetworkList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListSubnetworksRequest): + The initial request object. + response (google.cloud.compute_v1.types.SubnetworkList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.SubnetworkList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Subnetwork]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListUsablePager: + """A pager for iterating through ``list_usable`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.UsableSubnetworksAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListUsable`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.UsableSubnetworksAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.UsableSubnetworksAggregatedList], + request: compute.ListUsableSubnetworksRequest, + response: compute.UsableSubnetworksAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListUsableSubnetworksRequest): + The initial request object. + response (google.cloud.compute_v1.types.UsableSubnetworksAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListUsableSubnetworksRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.UsableSubnetworksAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.UsableSubnetwork]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/__init__.py new file mode 100644 index 000000000..4b21be238 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import SubnetworksTransport +from .rest import SubnetworksRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[SubnetworksTransport]] +_transport_registry['rest'] = SubnetworksRestTransport + +__all__ = ( + 'SubnetworksTransport', + 'SubnetworksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/base.py new file mode 100644 index 000000000..cd8fe7559 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/base.py @@ -0,0 +1,301 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class SubnetworksTransport(abc.ABC): + """Abstract transport class for Subnetworks.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.expand_ip_cidr_range: gapic_v1.method.wrap_method( + self.expand_ip_cidr_range, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.list_usable: gapic_v1.method.wrap_method( + self.list_usable, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_private_ip_google_access: gapic_v1.method.wrap_method( + self.set_private_ip_google_access, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListSubnetworksRequest], + Union[ + compute.SubnetworkAggregatedList, + Awaitable[compute.SubnetworkAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteSubnetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def expand_ip_cidr_range(self) -> Callable[ + [compute.ExpandIpCidrRangeSubnetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetSubnetworkRequest], + Union[ + compute.Subnetwork, + Awaitable[compute.Subnetwork] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicySubnetworkRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertSubnetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListSubnetworksRequest], + Union[ + compute.SubnetworkList, + Awaitable[compute.SubnetworkList] + ]]: + raise NotImplementedError() + + @property + def list_usable(self) -> Callable[ + [compute.ListUsableSubnetworksRequest], + Union[ + compute.UsableSubnetworksAggregatedList, + Awaitable[compute.UsableSubnetworksAggregatedList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchSubnetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicySubnetworkRequest], + Union[ + compute.Policy, + Awaitable[compute.Policy] + ]]: + raise NotImplementedError() + + @property + def set_private_ip_google_access(self) -> Callable[ + [compute.SetPrivateIpGoogleAccessSubnetworkRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsSubnetworkRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'SubnetworksTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/rest.py new file mode 100644 index 000000000..92d5348f5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/subnetworks/transports/rest.py @@ -0,0 +1,1497 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import SubnetworksTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class SubnetworksRestTransport(SubnetworksTransport): + """REST backend transport for Subnetworks. + + The Subnetworks API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListSubnetworksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SubnetworkAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListSubnetworksRequest): + The request object. A request message for + Subnetworks.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SubnetworkAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/subnetworks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListSubnetworksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListSubnetworksRequest.to_json( + compute.AggregatedListSubnetworksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SubnetworkAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteSubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteSubnetworkRequest): + The request object. A request message for + Subnetworks.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "subnetwork", + "subnetwork" + ), + ] + + request_kwargs = compute.DeleteSubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteSubnetworkRequest.to_json( + compute.DeleteSubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _expand_ip_cidr_range(self, + request: compute.ExpandIpCidrRangeSubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the expand ip cidr range method over HTTP. + + Args: + request (~.compute.ExpandIpCidrRangeSubnetworkRequest): + The request object. A request message for + Subnetworks.ExpandIpCidrRange. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange', + 'body': 'subnetworks_expand_ip_cidr_range_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "subnetwork", + "subnetwork" + ), + ] + + request_kwargs = compute.ExpandIpCidrRangeSubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SubnetworksExpandIpCidrRangeRequest.to_json( + compute.SubnetworksExpandIpCidrRangeRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ExpandIpCidrRangeSubnetworkRequest.to_json( + compute.ExpandIpCidrRangeSubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetSubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Subnetwork: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetSubnetworkRequest): + The request object. A request message for + Subnetworks.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Subnetwork: + Represents a Subnetwork resource. A + subnetwork (also known as a subnet) is a + logical partition of a Virtual Private + Cloud network with one primary IP range + and zero or more secondary IP ranges. + For more information, read Virtual + Private Cloud (VPC) Network. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "subnetwork", + "subnetwork" + ), + ] + + request_kwargs = compute.GetSubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetSubnetworkRequest.to_json( + compute.GetSubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Subnetwork.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_iam_policy(self, + request: compute.GetIamPolicySubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.compute.GetIamPolicySubnetworkRequest): + The request object. A request message for + Subnetworks.GetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.GetIamPolicySubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetIamPolicySubnetworkRequest.to_json( + compute.GetIamPolicySubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertSubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertSubnetworkRequest): + The request object. A request message for + Subnetworks.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks', + 'body': 'subnetwork_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertSubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Subnetwork.to_json( + compute.Subnetwork( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertSubnetworkRequest.to_json( + compute.InsertSubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListSubnetworksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.SubnetworkList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListSubnetworksRequest): + The request object. A request message for + Subnetworks.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.SubnetworkList: + Contains a list of Subnetwork + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListSubnetworksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListSubnetworksRequest.to_json( + compute.ListSubnetworksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.SubnetworkList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list_usable(self, + request: compute.ListUsableSubnetworksRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UsableSubnetworksAggregatedList: + r"""Call the list usable method over HTTP. + + Args: + request (~.compute.ListUsableSubnetworksRequest): + The request object. A request message for + Subnetworks.ListUsable. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UsableSubnetworksAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/subnetworks/listUsable', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListUsableSubnetworksRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListUsableSubnetworksRequest.to_json( + compute.ListUsableSubnetworksRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UsableSubnetworksAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchSubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchSubnetworkRequest): + The request object. A request message for + Subnetworks.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}', + 'body': 'subnetwork_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "subnetwork", + "subnetwork" + ), + ] + + request_kwargs = compute.PatchSubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.Subnetwork.to_json( + compute.Subnetwork( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchSubnetworkRequest.to_json( + compute.PatchSubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_iam_policy(self, + request: compute.SetIamPolicySubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.compute.SetIamPolicySubnetworkRequest): + The request object. A request message for + Subnetworks.SetIamPolicy. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. A + ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members`` to a single + ``role``. Members can be user accounts, service + accounts, Google groups, and domains (such as G Suite). + A ``role`` is a named list of permissions; each ``role`` + can be an IAM predefined role or a user-created custom + role. For some types of Google Cloud resources, a + ``binding`` can also specify a ``condition``, which is a + logical expression that allows access to a resource only + if the expression evaluates to ``true``. A condition can + add constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] }, { "role": + "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": + "expirable access", "description": "Does not grant + access after Sep 2020", "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** + bindings: - members: - user:mike@example.com - + group:admins@example.com - domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin - members: + - user:eve@example.com role: + roles/resourcemanager.organizationViewer condition: + title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, + see the `IAM + documentation `__. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{resource}/setIamPolicy', + 'body': 'region_set_policy_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetIamPolicySubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetPolicyRequest.to_json( + compute.RegionSetPolicyRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetIamPolicySubnetworkRequest.to_json( + compute.SetIamPolicySubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Policy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_private_ip_google_access(self, + request: compute.SetPrivateIpGoogleAccessSubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set private ip google + access method over HTTP. + + Args: + request (~.compute.SetPrivateIpGoogleAccessSubnetworkRequest): + The request object. A request message for + Subnetworks.SetPrivateIpGoogleAccess. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess', + 'body': 'subnetworks_set_private_ip_google_access_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "subnetwork", + "subnetwork" + ), + ] + + request_kwargs = compute.SetPrivateIpGoogleAccessSubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SubnetworksSetPrivateIpGoogleAccessRequest.to_json( + compute.SubnetworksSetPrivateIpGoogleAccessRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetPrivateIpGoogleAccessSubnetworkRequest.to_json( + compute.SetPrivateIpGoogleAccessSubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsSubnetworkRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsSubnetworkRequest): + The request object. A request message for + Subnetworks.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsSubnetworkRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsSubnetworkRequest.to_json( + compute.TestIamPermissionsSubnetworkRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListSubnetworksRequest], + compute.SubnetworkAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteSubnetworkRequest], + compute.Operation]: + return self._delete + @ property + def expand_ip_cidr_range(self) -> Callable[ + [compute.ExpandIpCidrRangeSubnetworkRequest], + compute.Operation]: + return self._expand_ip_cidr_range + @ property + def get(self) -> Callable[ + [compute.GetSubnetworkRequest], + compute.Subnetwork]: + return self._get + @ property + def get_iam_policy(self) -> Callable[ + [compute.GetIamPolicySubnetworkRequest], + compute.Policy]: + return self._get_iam_policy + @ property + def insert(self) -> Callable[ + [compute.InsertSubnetworkRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListSubnetworksRequest], + compute.SubnetworkList]: + return self._list + @ property + def list_usable(self) -> Callable[ + [compute.ListUsableSubnetworksRequest], + compute.UsableSubnetworksAggregatedList]: + return self._list_usable + @ property + def patch(self) -> Callable[ + [compute.PatchSubnetworkRequest], + compute.Operation]: + return self._patch + @ property + def set_iam_policy(self) -> Callable[ + [compute.SetIamPolicySubnetworkRequest], + compute.Policy]: + return self._set_iam_policy + @ property + def set_private_ip_google_access(self) -> Callable[ + [compute.SetPrivateIpGoogleAccessSubnetworkRequest], + compute.Operation]: + return self._set_private_ip_google_access + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsSubnetworkRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'SubnetworksRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/__init__.py new file mode 100644 index 000000000..8cf323964 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetGrpcProxiesClient + +__all__ = ( + 'TargetGrpcProxiesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/client.py new file mode 100644 index 000000000..0012539c5 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/client.py @@ -0,0 +1,793 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_grpc_proxies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetGrpcProxiesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetGrpcProxiesRestTransport + + +class TargetGrpcProxiesClientMeta(type): + """Metaclass for the TargetGrpcProxies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetGrpcProxiesTransport]] + _transport_registry["rest"] = TargetGrpcProxiesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetGrpcProxiesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetGrpcProxiesClient(metaclass=TargetGrpcProxiesClientMeta): + """The TargetGrpcProxies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetGrpcProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetGrpcProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetGrpcProxiesTransport: + """Returns the transport used by the client instance. + + Returns: + TargetGrpcProxiesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetGrpcProxiesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target grpc proxies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetGrpcProxiesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetGrpcProxiesTransport): + # transport is a TargetGrpcProxiesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteTargetGrpcProxyRequest, dict] = None, + *, + project: str = None, + target_grpc_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetGrpcProxy in the given + scope + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetGrpcProxyRequest, dict]): + The request object. A request message for + TargetGrpcProxies.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_grpc_proxy (str): + Name of the TargetGrpcProxy resource + to delete. + + This corresponds to the ``target_grpc_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_grpc_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetGrpcProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetGrpcProxyRequest): + request = compute.DeleteTargetGrpcProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_grpc_proxy is not None: + request.target_grpc_proxy = target_grpc_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetGrpcProxyRequest, dict] = None, + *, + project: str = None, + target_grpc_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetGrpcProxy: + r"""Returns the specified TargetGrpcProxy resource in the + given scope. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetGrpcProxyRequest, dict]): + The request object. A request message for + TargetGrpcProxies.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_grpc_proxy (str): + Name of the TargetGrpcProxy resource + to return. + + This corresponds to the ``target_grpc_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetGrpcProxy: + Represents a Target gRPC Proxy resource. A target gRPC + proxy is a component of load balancers intended for load + balancing gRPC traffic. Only global forwarding rules + with load balancing scheme INTERNAL_SELF_MANAGED can + reference a target gRPC proxy. The target gRPC Proxy + references a URL map that specifies how traffic is + routed to gRPC backend services. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_grpc_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetGrpcProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetGrpcProxyRequest): + request = compute.GetTargetGrpcProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_grpc_proxy is not None: + request.target_grpc_proxy = target_grpc_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetGrpcProxyRequest, dict] = None, + *, + project: str = None, + target_grpc_proxy_resource: compute.TargetGrpcProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetGrpcProxy in the specified project in + the given scope using the parameters that are included + in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetGrpcProxyRequest, dict]): + The request object. A request message for + TargetGrpcProxies.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_grpc_proxy_resource (google.cloud.compute_v1.types.TargetGrpcProxy): + The body resource for this request + This corresponds to the ``target_grpc_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_grpc_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetGrpcProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetGrpcProxyRequest): + request = compute.InsertTargetGrpcProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_grpc_proxy_resource is not None: + request.target_grpc_proxy_resource = target_grpc_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetGrpcProxiesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Lists the TargetGrpcProxies for a project in the + given scope. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetGrpcProxiesRequest, dict]): + The request object. A request message for + TargetGrpcProxies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_grpc_proxies.pagers.ListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetGrpcProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetGrpcProxiesRequest): + request = compute.ListTargetGrpcProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchTargetGrpcProxyRequest, dict] = None, + *, + project: str = None, + target_grpc_proxy: str = None, + target_grpc_proxy_resource: compute.TargetGrpcProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified TargetGrpcProxy resource with + the data included in the request. This method supports + PATCH semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchTargetGrpcProxyRequest, dict]): + The request object. A request message for + TargetGrpcProxies.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_grpc_proxy (str): + Name of the TargetGrpcProxy resource + to patch. + + This corresponds to the ``target_grpc_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_grpc_proxy_resource (google.cloud.compute_v1.types.TargetGrpcProxy): + The body resource for this request + This corresponds to the ``target_grpc_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_grpc_proxy, target_grpc_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchTargetGrpcProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchTargetGrpcProxyRequest): + request = compute.PatchTargetGrpcProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_grpc_proxy is not None: + request.target_grpc_proxy = target_grpc_proxy + if target_grpc_proxy_resource is not None: + request.target_grpc_proxy_resource = target_grpc_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetGrpcProxiesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/pagers.py new file mode 100644 index 000000000..9bb36d642 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetGrpcProxyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetGrpcProxyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetGrpcProxyList], + request: compute.ListTargetGrpcProxiesRequest, + response: compute.TargetGrpcProxyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetGrpcProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetGrpcProxyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetGrpcProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetGrpcProxyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetGrpcProxy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/__init__.py new file mode 100644 index 000000000..258b8035b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetGrpcProxiesTransport +from .rest import TargetGrpcProxiesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetGrpcProxiesTransport]] +_transport_registry['rest'] = TargetGrpcProxiesRestTransport + +__all__ = ( + 'TargetGrpcProxiesTransport', + 'TargetGrpcProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/base.py new file mode 100644 index 000000000..ef720ea7e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetGrpcProxiesTransport(abc.ABC): + """Abstract transport class for TargetGrpcProxies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetGrpcProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetGrpcProxyRequest], + Union[ + compute.TargetGrpcProxy, + Awaitable[compute.TargetGrpcProxy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetGrpcProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetGrpcProxiesRequest], + Union[ + compute.TargetGrpcProxyList, + Awaitable[compute.TargetGrpcProxyList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchTargetGrpcProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetGrpcProxiesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/rest.py new file mode 100644 index 000000000..5114e8e6f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_grpc_proxies/transports/rest.py @@ -0,0 +1,658 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetGrpcProxiesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetGrpcProxiesRestTransport(TargetGrpcProxiesTransport): + """REST backend transport for TargetGrpcProxies. + + The TargetGrpcProxies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteTargetGrpcProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetGrpcProxyRequest): + The request object. A request message for + TargetGrpcProxies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/targetGrpcProxies/{target_grpc_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_grpc_proxy", + "targetGrpcProxy" + ), + ] + + request_kwargs = compute.DeleteTargetGrpcProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetGrpcProxyRequest.to_json( + compute.DeleteTargetGrpcProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetGrpcProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetGrpcProxy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetGrpcProxyRequest): + The request object. A request message for + TargetGrpcProxies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetGrpcProxy: + Represents a Target gRPC Proxy resource. A target gRPC + proxy is a component of load balancers intended for load + balancing gRPC traffic. Only global forwarding rules + with load balancing scheme INTERNAL_SELF_MANAGED can + reference a target gRPC proxy. The target gRPC Proxy + references a URL map that specifies how traffic is + routed to gRPC backend services. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetGrpcProxies/{target_grpc_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_grpc_proxy", + "targetGrpcProxy" + ), + ] + + request_kwargs = compute.GetTargetGrpcProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetGrpcProxyRequest.to_json( + compute.GetTargetGrpcProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetGrpcProxy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetGrpcProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetGrpcProxyRequest): + The request object. A request message for + TargetGrpcProxies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetGrpcProxies', + 'body': 'target_grpc_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertTargetGrpcProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetGrpcProxy.to_json( + compute.TargetGrpcProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetGrpcProxyRequest.to_json( + compute.InsertTargetGrpcProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetGrpcProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetGrpcProxyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetGrpcProxiesRequest): + The request object. A request message for + TargetGrpcProxies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetGrpcProxyList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetGrpcProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListTargetGrpcProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetGrpcProxiesRequest.to_json( + compute.ListTargetGrpcProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetGrpcProxyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchTargetGrpcProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchTargetGrpcProxyRequest): + The request object. A request message for + TargetGrpcProxies.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/targetGrpcProxies/{target_grpc_proxy}', + 'body': 'target_grpc_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_grpc_proxy", + "targetGrpcProxy" + ), + ] + + request_kwargs = compute.PatchTargetGrpcProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetGrpcProxy.to_json( + compute.TargetGrpcProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchTargetGrpcProxyRequest.to_json( + compute.PatchTargetGrpcProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetGrpcProxyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetGrpcProxyRequest], + compute.TargetGrpcProxy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertTargetGrpcProxyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetGrpcProxiesRequest], + compute.TargetGrpcProxyList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchTargetGrpcProxyRequest], + compute.Operation]: + return self._patch + def close(self): + self._session.close() + + +__all__=( + 'TargetGrpcProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/__init__.py new file mode 100644 index 000000000..b50342a01 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetHttpProxiesClient + +__all__ = ( + 'TargetHttpProxiesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/client.py new file mode 100644 index 000000000..2bdcfc380 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/client.py @@ -0,0 +1,975 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_http_proxies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetHttpProxiesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetHttpProxiesRestTransport + + +class TargetHttpProxiesClientMeta(type): + """Metaclass for the TargetHttpProxies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetHttpProxiesTransport]] + _transport_registry["rest"] = TargetHttpProxiesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetHttpProxiesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetHttpProxiesClient(metaclass=TargetHttpProxiesClientMeta): + """The TargetHttpProxies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetHttpProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetHttpProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetHttpProxiesTransport: + """Returns the transport used by the client instance. + + Returns: + TargetHttpProxiesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetHttpProxiesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target http proxies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetHttpProxiesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetHttpProxiesTransport): + # transport is a TargetHttpProxiesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListTargetHttpProxiesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of all TargetHttpProxy resources, + regional and global, available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListTargetHttpProxiesRequest, dict]): + The request object. A request message for + TargetHttpProxies.AggregatedList. See the method + description for details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_http_proxies.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListTargetHttpProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListTargetHttpProxiesRequest): + request = compute.AggregatedListTargetHttpProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + target_http_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetHttpProxy resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetHttpProxyRequest, dict]): + The request object. A request message for + TargetHttpProxies.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy (str): + Name of the TargetHttpProxy resource + to delete. + + This corresponds to the ``target_http_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_http_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetHttpProxyRequest): + request = compute.DeleteTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_http_proxy is not None: + request.target_http_proxy = target_http_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + target_http_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetHttpProxy: + r"""Returns the specified TargetHttpProxy resource. Gets + a list of available target HTTP proxies by making a + list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetHttpProxyRequest, dict]): + The request object. A request message for + TargetHttpProxies.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy (str): + Name of the TargetHttpProxy resource + to return. + + This corresponds to the ``target_http_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetHttpProxy: + Represents a Target HTTP Proxy resource. Google Compute + Engine has two Target HTTP Proxy resources: \* + [Global](/compute/docs/reference/rest/v1/targetHttpProxies) + \* + [Regional](/compute/docs/reference/rest/v1/regionTargetHttpProxies) + A target HTTP proxy is a component of GCP HTTP load + balancers. \* targetHttpProxies are used by external + HTTP load balancers and Traffic Director. \* + regionTargetHttpProxies are used by internal HTTP load + balancers. Forwarding rules reference a target HTTP + proxy, and the target proxy then references a URL map. + For more information, read Using Target Proxies and + Forwarding rule concepts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_http_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetHttpProxyRequest): + request = compute.GetTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_http_proxy is not None: + request.target_http_proxy = target_http_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + target_http_proxy_resource: compute.TargetHttpProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetHttpProxy resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetHttpProxyRequest, dict]): + The request object. A request message for + TargetHttpProxies.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy_resource (google.cloud.compute_v1.types.TargetHttpProxy): + The body resource for this request + This corresponds to the ``target_http_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_http_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetHttpProxyRequest): + request = compute.InsertTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_http_proxy_resource is not None: + request.target_http_proxy_resource = target_http_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetHttpProxiesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of TargetHttpProxy resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetHttpProxiesRequest, dict]): + The request object. A request message for + TargetHttpProxies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_http_proxies.pagers.ListPager: + A list of TargetHttpProxy resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetHttpProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetHttpProxiesRequest): + request = compute.ListTargetHttpProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + target_http_proxy: str = None, + target_http_proxy_resource: compute.TargetHttpProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified TargetHttpProxy resource with + the data included in the request. This method supports + PATCH semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchTargetHttpProxyRequest, dict]): + The request object. A request message for + TargetHttpProxies.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy (str): + Name of the TargetHttpProxy resource + to patch. + + This corresponds to the ``target_http_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy_resource (google.cloud.compute_v1.types.TargetHttpProxy): + The body resource for this request + This corresponds to the ``target_http_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_http_proxy, target_http_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchTargetHttpProxyRequest): + request = compute.PatchTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_http_proxy is not None: + request.target_http_proxy = target_http_proxy + if target_http_proxy_resource is not None: + request.target_http_proxy_resource = target_http_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_url_map(self, + request: Union[compute.SetUrlMapTargetHttpProxyRequest, dict] = None, + *, + project: str = None, + target_http_proxy: str = None, + url_map_reference_resource: compute.UrlMapReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the URL map for TargetHttpProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetUrlMapTargetHttpProxyRequest, dict]): + The request object. A request message for + TargetHttpProxies.SetUrlMap. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_http_proxy (str): + Name of the TargetHttpProxy to set a + URL map for. + + This corresponds to the ``target_http_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + This corresponds to the ``url_map_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_http_proxy, url_map_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetUrlMapTargetHttpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetUrlMapTargetHttpProxyRequest): + request = compute.SetUrlMapTargetHttpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_http_proxy is not None: + request.target_http_proxy = target_http_proxy + if url_map_reference_resource is not None: + request.url_map_reference_resource = url_map_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_url_map] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetHttpProxiesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/pagers.py new file mode 100644 index 000000000..334050256 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetHttpProxyAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetHttpProxyAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetHttpProxyAggregatedList], + request: compute.AggregatedListTargetHttpProxiesRequest, + response: compute.TargetHttpProxyAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListTargetHttpProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetHttpProxyAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListTargetHttpProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetHttpProxyAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.TargetHttpProxiesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.TargetHttpProxiesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetHttpProxyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetHttpProxyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetHttpProxyList], + request: compute.ListTargetHttpProxiesRequest, + response: compute.TargetHttpProxyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetHttpProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetHttpProxyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetHttpProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetHttpProxyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetHttpProxy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/__init__.py new file mode 100644 index 000000000..565242191 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetHttpProxiesTransport +from .rest import TargetHttpProxiesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetHttpProxiesTransport]] +_transport_registry['rest'] = TargetHttpProxiesRestTransport + +__all__ = ( + 'TargetHttpProxiesTransport', + 'TargetHttpProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/base.py new file mode 100644 index 000000000..57a03d043 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/base.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetHttpProxiesTransport(abc.ABC): + """Abstract transport class for TargetHttpProxies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_url_map: gapic_v1.method.wrap_method( + self.set_url_map, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetHttpProxiesRequest], + Union[ + compute.TargetHttpProxyAggregatedList, + Awaitable[compute.TargetHttpProxyAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetHttpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetHttpProxyRequest], + Union[ + compute.TargetHttpProxy, + Awaitable[compute.TargetHttpProxy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetHttpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetHttpProxiesRequest], + Union[ + compute.TargetHttpProxyList, + Awaitable[compute.TargetHttpProxyList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchTargetHttpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapTargetHttpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetHttpProxiesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/rest.py new file mode 100644 index 000000000..543bd692b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_http_proxies/transports/rest.py @@ -0,0 +1,872 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetHttpProxiesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetHttpProxiesRestTransport(TargetHttpProxiesTransport): + """REST backend transport for TargetHttpProxies. + + The TargetHttpProxies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListTargetHttpProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpProxyAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListTargetHttpProxiesRequest): + The request object. A request message for + TargetHttpProxies.AggregatedList. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpProxyAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/targetHttpProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListTargetHttpProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListTargetHttpProxiesRequest.to_json( + compute.AggregatedListTargetHttpProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpProxyAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetHttpProxyRequest): + The request object. A request message for + TargetHttpProxies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/targetHttpProxies/{target_http_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_http_proxy", + "targetHttpProxy" + ), + ] + + request_kwargs = compute.DeleteTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetHttpProxyRequest.to_json( + compute.DeleteTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpProxy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetHttpProxyRequest): + The request object. A request message for + TargetHttpProxies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpProxy: + Represents a Target HTTP Proxy resource. Google Compute + Engine has two Target HTTP Proxy resources: \* + `Global `__ + \* + `Regional `__ + A target HTTP proxy is a component of GCP HTTP load + balancers. \* targetHttpProxies are used by external + HTTP load balancers and Traffic Director. \* + regionTargetHttpProxies are used by internal HTTP load + balancers. Forwarding rules reference a target HTTP + proxy, and the target proxy then references a URL map. + For more information, read Using Target Proxies and + Forwarding rule concepts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetHttpProxies/{target_http_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_http_proxy", + "targetHttpProxy" + ), + ] + + request_kwargs = compute.GetTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetHttpProxyRequest.to_json( + compute.GetTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpProxy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetHttpProxyRequest): + The request object. A request message for + TargetHttpProxies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetHttpProxies', + 'body': 'target_http_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpProxy.to_json( + compute.TargetHttpProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetHttpProxyRequest.to_json( + compute.InsertTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetHttpProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpProxyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetHttpProxiesRequest): + The request object. A request message for + TargetHttpProxies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpProxyList: + A list of TargetHttpProxy resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetHttpProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListTargetHttpProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetHttpProxiesRequest.to_json( + compute.ListTargetHttpProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpProxyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchTargetHttpProxyRequest): + The request object. A request message for + TargetHttpProxies.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/targetHttpProxies/{target_http_proxy}', + 'body': 'target_http_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_http_proxy", + "targetHttpProxy" + ), + ] + + request_kwargs = compute.PatchTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpProxy.to_json( + compute.TargetHttpProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchTargetHttpProxyRequest.to_json( + compute.PatchTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_url_map(self, + request: compute.SetUrlMapTargetHttpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set url map method over HTTP. + + Args: + request (~.compute.SetUrlMapTargetHttpProxyRequest): + The request object. A request message for + TargetHttpProxies.SetUrlMap. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/targetHttpProxies/{target_http_proxy}/setUrlMap', + 'body': 'url_map_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_http_proxy", + "targetHttpProxy" + ), + ] + + request_kwargs = compute.SetUrlMapTargetHttpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMapReference.to_json( + compute.UrlMapReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetUrlMapTargetHttpProxyRequest.to_json( + compute.SetUrlMapTargetHttpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetHttpProxiesRequest], + compute.TargetHttpProxyAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetHttpProxyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetHttpProxyRequest], + compute.TargetHttpProxy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertTargetHttpProxyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetHttpProxiesRequest], + compute.TargetHttpProxyList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchTargetHttpProxyRequest], + compute.Operation]: + return self._patch + @ property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapTargetHttpProxyRequest], + compute.Operation]: + return self._set_url_map + def close(self): + self._session.close() + + +__all__=( + 'TargetHttpProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/__init__.py new file mode 100644 index 000000000..b0684b6bb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetHttpsProxiesClient + +__all__ = ( + 'TargetHttpsProxiesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/client.py new file mode 100644 index 000000000..834fd5168 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/client.py @@ -0,0 +1,1273 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_https_proxies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetHttpsProxiesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetHttpsProxiesRestTransport + + +class TargetHttpsProxiesClientMeta(type): + """Metaclass for the TargetHttpsProxies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetHttpsProxiesTransport]] + _transport_registry["rest"] = TargetHttpsProxiesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetHttpsProxiesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetHttpsProxiesClient(metaclass=TargetHttpsProxiesClientMeta): + """The TargetHttpsProxies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetHttpsProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetHttpsProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetHttpsProxiesTransport: + """Returns the transport used by the client instance. + + Returns: + TargetHttpsProxiesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetHttpsProxiesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target https proxies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetHttpsProxiesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetHttpsProxiesTransport): + # transport is a TargetHttpsProxiesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListTargetHttpsProxiesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of all TargetHttpsProxy resources, + regional and global, available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListTargetHttpsProxiesRequest, dict]): + The request object. A request message for + TargetHttpsProxies.AggregatedList. See the method + description for details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_https_proxies.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListTargetHttpsProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListTargetHttpsProxiesRequest): + request = compute.AggregatedListTargetHttpsProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetHttpsProxy resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.Delete. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to delete. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetHttpsProxyRequest): + request = compute.DeleteTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetHttpsProxy: + r"""Returns the specified TargetHttpsProxy resource. Gets + a list of available target HTTPS proxies by making a + list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to return. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetHttpsProxy: + Represents a Target HTTPS Proxy resource. Google Compute + Engine has two Target HTTPS Proxy resources: \* + [Global](/compute/docs/reference/rest/v1/targetHttpsProxies) + \* + [Regional](/compute/docs/reference/rest/v1/regionTargetHttpsProxies) + A target HTTPS proxy is a component of GCP HTTPS load + balancers. \* targetHttpsProxies are used by external + HTTPS load balancers. \* regionTargetHttpsProxies are + used by internal HTTPS load balancers. Forwarding rules + reference a target HTTPS proxy, and the target proxy + then references a URL map. For more information, read + Using Target Proxies and Forwarding rule concepts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetHttpsProxyRequest): + request = compute.GetTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy_resource: compute.TargetHttpsProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetHttpsProxy resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.Insert. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy_resource (google.cloud.compute_v1.types.TargetHttpsProxy): + The body resource for this request + This corresponds to the ``target_https_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetHttpsProxyRequest): + request = compute.InsertTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy_resource is not None: + request.target_https_proxy_resource = target_https_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetHttpsProxiesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of TargetHttpsProxy resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetHttpsProxiesRequest, dict]): + The request object. A request message for + TargetHttpsProxies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_https_proxies.pagers.ListPager: + Contains a list of TargetHttpsProxy + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetHttpsProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetHttpsProxiesRequest): + request = compute.ListTargetHttpsProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy: str = None, + target_https_proxy_resource: compute.TargetHttpsProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified TargetHttpsProxy resource with + the data included in the request. This method supports + PATCH semantics and uses JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.Patch. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to patch. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy_resource (google.cloud.compute_v1.types.TargetHttpsProxy): + The body resource for this request + This corresponds to the ``target_https_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy, target_https_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchTargetHttpsProxyRequest): + request = compute.PatchTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + if target_https_proxy_resource is not None: + request.target_https_proxy_resource = target_https_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_quic_override(self, + request: Union[compute.SetQuicOverrideTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy: str = None, + target_https_proxies_set_quic_override_request_resource: compute.TargetHttpsProxiesSetQuicOverrideRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the QUIC override policy for TargetHttpsProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetQuicOverrideTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.SetQuicOverride. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to set the QUIC override policy for. The + name should conform to RFC1035. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxies_set_quic_override_request_resource (google.cloud.compute_v1.types.TargetHttpsProxiesSetQuicOverrideRequest): + The body resource for this request + This corresponds to the ``target_https_proxies_set_quic_override_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy, target_https_proxies_set_quic_override_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetQuicOverrideTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetQuicOverrideTargetHttpsProxyRequest): + request = compute.SetQuicOverrideTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + if target_https_proxies_set_quic_override_request_resource is not None: + request.target_https_proxies_set_quic_override_request_resource = target_https_proxies_set_quic_override_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_quic_override] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_ssl_certificates(self, + request: Union[compute.SetSslCertificatesTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy: str = None, + target_https_proxies_set_ssl_certificates_request_resource: compute.TargetHttpsProxiesSetSslCertificatesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Replaces SslCertificates for TargetHttpsProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetSslCertificatesTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.SetSslCertificates. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + to set an SslCertificates resource for. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxies_set_ssl_certificates_request_resource (google.cloud.compute_v1.types.TargetHttpsProxiesSetSslCertificatesRequest): + The body resource for this request + This corresponds to the ``target_https_proxies_set_ssl_certificates_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy, target_https_proxies_set_ssl_certificates_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetSslCertificatesTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetSslCertificatesTargetHttpsProxyRequest): + request = compute.SetSslCertificatesTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + if target_https_proxies_set_ssl_certificates_request_resource is not None: + request.target_https_proxies_set_ssl_certificates_request_resource = target_https_proxies_set_ssl_certificates_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_ssl_certificates] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_ssl_policy(self, + request: Union[compute.SetSslPolicyTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy: str = None, + ssl_policy_reference_resource: compute.SslPolicyReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the SSL policy for TargetHttpsProxy. The SSL + policy specifies the server-side support for SSL + features. This affects connections between clients and + the HTTPS proxy load balancer. They do not affect the + connection between the load balancer and the backends. + + Args: + request (Union[google.cloud.compute_v1.types.SetSslPolicyTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.SetSslPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + whose SSL policy is to be set. The name + must be 1-63 characters long, and comply + with RFC1035. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_policy_reference_resource (google.cloud.compute_v1.types.SslPolicyReference): + The body resource for this request + This corresponds to the ``ssl_policy_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy, ssl_policy_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetSslPolicyTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetSslPolicyTargetHttpsProxyRequest): + request = compute.SetSslPolicyTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + if ssl_policy_reference_resource is not None: + request.ssl_policy_reference_resource = ssl_policy_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_ssl_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_url_map(self, + request: Union[compute.SetUrlMapTargetHttpsProxyRequest, dict] = None, + *, + project: str = None, + target_https_proxy: str = None, + url_map_reference_resource: compute.UrlMapReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the URL map for TargetHttpsProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetUrlMapTargetHttpsProxyRequest, dict]): + The request object. A request message for + TargetHttpsProxies.SetUrlMap. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_https_proxy (str): + Name of the TargetHttpsProxy resource + whose URL map is to be set. + + This corresponds to the ``target_https_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + This corresponds to the ``url_map_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_https_proxy, url_map_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetUrlMapTargetHttpsProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetUrlMapTargetHttpsProxyRequest): + request = compute.SetUrlMapTargetHttpsProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_https_proxy is not None: + request.target_https_proxy = target_https_proxy + if url_map_reference_resource is not None: + request.url_map_reference_resource = url_map_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_url_map] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetHttpsProxiesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/pagers.py new file mode 100644 index 000000000..1f85ca3ce --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetHttpsProxyAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetHttpsProxyAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetHttpsProxyAggregatedList], + request: compute.AggregatedListTargetHttpsProxiesRequest, + response: compute.TargetHttpsProxyAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListTargetHttpsProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetHttpsProxyAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListTargetHttpsProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetHttpsProxyAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.TargetHttpsProxiesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.TargetHttpsProxiesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetHttpsProxyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetHttpsProxyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetHttpsProxyList], + request: compute.ListTargetHttpsProxiesRequest, + response: compute.TargetHttpsProxyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetHttpsProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetHttpsProxyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetHttpsProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetHttpsProxyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetHttpsProxy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/__init__.py new file mode 100644 index 000000000..ee155a431 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetHttpsProxiesTransport +from .rest import TargetHttpsProxiesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetHttpsProxiesTransport]] +_transport_registry['rest'] = TargetHttpsProxiesRestTransport + +__all__ = ( + 'TargetHttpsProxiesTransport', + 'TargetHttpsProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/base.py new file mode 100644 index 000000000..a4de6d4f1 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/base.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetHttpsProxiesTransport(abc.ABC): + """Abstract transport class for TargetHttpsProxies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.set_quic_override: gapic_v1.method.wrap_method( + self.set_quic_override, + default_timeout=None, + client_info=client_info, + ), + self.set_ssl_certificates: gapic_v1.method.wrap_method( + self.set_ssl_certificates, + default_timeout=None, + client_info=client_info, + ), + self.set_ssl_policy: gapic_v1.method.wrap_method( + self.set_ssl_policy, + default_timeout=None, + client_info=client_info, + ), + self.set_url_map: gapic_v1.method.wrap_method( + self.set_url_map, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetHttpsProxiesRequest], + Union[ + compute.TargetHttpsProxyAggregatedList, + Awaitable[compute.TargetHttpsProxyAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetHttpsProxyRequest], + Union[ + compute.TargetHttpsProxy, + Awaitable[compute.TargetHttpsProxy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetHttpsProxiesRequest], + Union[ + compute.TargetHttpsProxyList, + Awaitable[compute.TargetHttpsProxyList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_quic_override(self) -> Callable[ + [compute.SetQuicOverrideTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_ssl_certificates(self) -> Callable[ + [compute.SetSslCertificatesTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_ssl_policy(self) -> Callable[ + [compute.SetSslPolicyTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapTargetHttpsProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetHttpsProxiesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/rest.py new file mode 100644 index 000000000..56d159245 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_https_proxies/transports/rest.py @@ -0,0 +1,1227 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetHttpsProxiesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetHttpsProxiesRestTransport(TargetHttpsProxiesTransport): + """REST backend transport for TargetHttpsProxies. + + The TargetHttpsProxies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListTargetHttpsProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpsProxyAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListTargetHttpsProxiesRequest): + The request object. A request message for + TargetHttpsProxies.AggregatedList. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpsProxyAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/targetHttpsProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListTargetHttpsProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListTargetHttpsProxiesRequest.to_json( + compute.AggregatedListTargetHttpsProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpsProxyAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.Delete. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.DeleteTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetHttpsProxyRequest.to_json( + compute.DeleteTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpsProxy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpsProxy: + Represents a Target HTTPS Proxy resource. Google Compute + Engine has two Target HTTPS Proxy resources: \* + `Global `__ + \* + `Regional `__ + A target HTTPS proxy is a component of GCP HTTPS load + balancers. \* targetHttpsProxies are used by external + HTTPS load balancers. \* regionTargetHttpsProxies are + used by internal HTTPS load balancers. Forwarding rules + reference a target HTTPS proxy, and the target proxy + then references a URL map. For more information, read + Using Target Proxies and Forwarding rule concepts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.GetTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetHttpsProxyRequest.to_json( + compute.GetTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpsProxy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.Insert. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetHttpsProxies', + 'body': 'target_https_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpsProxy.to_json( + compute.TargetHttpsProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetHttpsProxyRequest.to_json( + compute.InsertTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetHttpsProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetHttpsProxyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetHttpsProxiesRequest): + The request object. A request message for + TargetHttpsProxies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetHttpsProxyList: + Contains a list of TargetHttpsProxy + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetHttpsProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListTargetHttpsProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetHttpsProxiesRequest.to_json( + compute.ListTargetHttpsProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetHttpsProxyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.Patch. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}', + 'body': 'target_https_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.PatchTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpsProxy.to_json( + compute.TargetHttpsProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchTargetHttpsProxyRequest.to_json( + compute.PatchTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_quic_override(self, + request: compute.SetQuicOverrideTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set quic override method over HTTP. + + Args: + request (~.compute.SetQuicOverrideTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.SetQuicOverride. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}/setQuicOverride', + 'body': 'target_https_proxies_set_quic_override_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.SetQuicOverrideTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpsProxiesSetQuicOverrideRequest.to_json( + compute.TargetHttpsProxiesSetQuicOverrideRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetQuicOverrideTargetHttpsProxyRequest.to_json( + compute.SetQuicOverrideTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_ssl_certificates(self, + request: compute.SetSslCertificatesTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set ssl certificates method over HTTP. + + Args: + request (~.compute.SetSslCertificatesTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.SetSslCertificates. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/targetHttpsProxies/{target_https_proxy}/setSslCertificates', + 'body': 'target_https_proxies_set_ssl_certificates_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.SetSslCertificatesTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetHttpsProxiesSetSslCertificatesRequest.to_json( + compute.TargetHttpsProxiesSetSslCertificatesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetSslCertificatesTargetHttpsProxyRequest.to_json( + compute.SetSslCertificatesTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_ssl_policy(self, + request: compute.SetSslPolicyTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set ssl policy method over HTTP. + + Args: + request (~.compute.SetSslPolicyTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.SetSslPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}/setSslPolicy', + 'body': 'ssl_policy_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.SetSslPolicyTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SslPolicyReference.to_json( + compute.SslPolicyReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetSslPolicyTargetHttpsProxyRequest.to_json( + compute.SetSslPolicyTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_url_map(self, + request: compute.SetUrlMapTargetHttpsProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set url map method over HTTP. + + Args: + request (~.compute.SetUrlMapTargetHttpsProxyRequest): + The request object. A request message for + TargetHttpsProxies.SetUrlMap. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/targetHttpsProxies/{target_https_proxy}/setUrlMap', + 'body': 'url_map_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_https_proxy", + "targetHttpsProxy" + ), + ] + + request_kwargs = compute.SetUrlMapTargetHttpsProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMapReference.to_json( + compute.UrlMapReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetUrlMapTargetHttpsProxyRequest.to_json( + compute.SetUrlMapTargetHttpsProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetHttpsProxiesRequest], + compute.TargetHttpsProxyAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetHttpsProxyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetHttpsProxyRequest], + compute.TargetHttpsProxy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertTargetHttpsProxyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetHttpsProxiesRequest], + compute.TargetHttpsProxyList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchTargetHttpsProxyRequest], + compute.Operation]: + return self._patch + @ property + def set_quic_override(self) -> Callable[ + [compute.SetQuicOverrideTargetHttpsProxyRequest], + compute.Operation]: + return self._set_quic_override + @ property + def set_ssl_certificates(self) -> Callable[ + [compute.SetSslCertificatesTargetHttpsProxyRequest], + compute.Operation]: + return self._set_ssl_certificates + @ property + def set_ssl_policy(self) -> Callable[ + [compute.SetSslPolicyTargetHttpsProxyRequest], + compute.Operation]: + return self._set_ssl_policy + @ property + def set_url_map(self) -> Callable[ + [compute.SetUrlMapTargetHttpsProxyRequest], + compute.Operation]: + return self._set_url_map + def close(self): + self._session.close() + + +__all__=( + 'TargetHttpsProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/__init__.py new file mode 100644 index 000000000..845864ebc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetInstancesClient + +__all__ = ( + 'TargetInstancesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/client.py new file mode 100644 index 000000000..5d217779e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/client.py @@ -0,0 +1,811 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_instances import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetInstancesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetInstancesRestTransport + + +class TargetInstancesClientMeta(type): + """Metaclass for the TargetInstances client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetInstancesTransport]] + _transport_registry["rest"] = TargetInstancesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetInstancesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetInstancesClient(metaclass=TargetInstancesClientMeta): + """The TargetInstances API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetInstancesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetInstancesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetInstancesTransport: + """Returns the transport used by the client instance. + + Returns: + TargetInstancesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetInstancesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target instances client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetInstancesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetInstancesTransport): + # transport is a TargetInstancesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListTargetInstancesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of target instances. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest, dict]): + The request object. A request message for + TargetInstances.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_instances.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListTargetInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListTargetInstancesRequest): + request = compute.AggregatedListTargetInstancesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteTargetInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + target_instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetInstance resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetInstanceRequest, dict]): + The request object. A request message for + TargetInstances.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone scoping this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_instance (str): + Name of the TargetInstance resource + to delete. + + This corresponds to the ``target_instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, target_instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetInstanceRequest): + request = compute.DeleteTargetInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if target_instance is not None: + request.target_instance = target_instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + target_instance: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetInstance: + r"""Returns the specified TargetInstance resource. Gets a + list of available target instances by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetInstanceRequest, dict]): + The request object. A request message for + TargetInstances.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone scoping this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_instance (str): + Name of the TargetInstance resource + to return. + + This corresponds to the ``target_instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetInstance: + Represents a Target Instance + resource. You can use a target instance + to handle traffic for one or more + forwarding rules, which is ideal for + forwarding protocol traffic that is + managed by a single source. For example, + ESP, AH, TCP, or UDP. For more + information, read Target instances. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, target_instance]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetInstanceRequest): + request = compute.GetTargetInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if target_instance is not None: + request.target_instance = target_instance + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetInstanceRequest, dict] = None, + *, + project: str = None, + zone: str = None, + target_instance_resource: compute.TargetInstance = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetInstance resource in the specified + project and zone using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetInstanceRequest, dict]): + The request object. A request message for + TargetInstances.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone scoping this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_instance_resource (google.cloud.compute_v1.types.TargetInstance): + The body resource for this request + This corresponds to the ``target_instance_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, target_instance_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetInstanceRequest): + request = compute.InsertTargetInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if target_instance_resource is not None: + request.target_instance_resource = target_instance_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetInstancesRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of TargetInstance resources + available to the specified project and zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetInstancesRequest, dict]): + The request object. A request message for + TargetInstances.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone scoping this + request. + + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_instances.pagers.ListPager: + Contains a list of TargetInstance + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetInstancesRequest): + request = compute.ListTargetInstancesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetInstancesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/pagers.py new file mode 100644 index 000000000..e25a7ed1d --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetInstanceAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetInstanceAggregatedList], + request: compute.AggregatedListTargetInstancesRequest, + response: compute.TargetInstanceAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetInstanceAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListTargetInstancesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetInstanceAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.TargetInstancesScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.TargetInstancesScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetInstanceList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetInstanceList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetInstanceList], + request: compute.ListTargetInstancesRequest, + response: compute.TargetInstanceList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetInstancesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetInstanceList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetInstancesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetInstanceList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetInstance]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/__init__.py new file mode 100644 index 000000000..e6bcc6aeb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetInstancesTransport +from .rest import TargetInstancesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetInstancesTransport]] +_transport_registry['rest'] = TargetInstancesRestTransport + +__all__ = ( + 'TargetInstancesTransport', + 'TargetInstancesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/base.py new file mode 100644 index 000000000..9d755644c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetInstancesTransport(abc.ABC): + """Abstract transport class for TargetInstances.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetInstancesRequest], + Union[ + compute.TargetInstanceAggregatedList, + Awaitable[compute.TargetInstanceAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetInstanceRequest], + Union[ + compute.TargetInstance, + Awaitable[compute.TargetInstance] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetInstanceRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetInstancesRequest], + Union[ + compute.TargetInstanceList, + Awaitable[compute.TargetInstanceList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetInstancesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/rest.py new file mode 100644 index 000000000..63ce461f0 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_instances/transports/rest.py @@ -0,0 +1,649 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetInstancesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetInstancesRestTransport(TargetInstancesTransport): + """REST backend transport for TargetInstances. + + The TargetInstances API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListTargetInstancesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetInstanceAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListTargetInstancesRequest): + The request object. A request message for + TargetInstances.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetInstanceAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/targetInstances', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListTargetInstancesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListTargetInstancesRequest.to_json( + compute.AggregatedListTargetInstancesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetInstanceAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteTargetInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetInstanceRequest): + The request object. A request message for + TargetInstances.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/targetInstances/{target_instance}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_instance", + "targetInstance" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteTargetInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetInstanceRequest.to_json( + compute.DeleteTargetInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetInstance: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetInstanceRequest): + The request object. A request message for + TargetInstances.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetInstance: + Represents a Target Instance + resource. You can use a target instance + to handle traffic for one or more + forwarding rules, which is ideal for + forwarding protocol traffic that is + managed by a single source. For example, + ESP, AH, TCP, or UDP. For more + information, read Target instances. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/targetInstances/{target_instance}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_instance", + "targetInstance" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetTargetInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetInstanceRequest.to_json( + compute.GetTargetInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetInstance.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetInstanceRequest): + The request object. A request message for + TargetInstances.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/targetInstances', + 'body': 'target_instance_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.InsertTargetInstanceRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetInstance.to_json( + compute.TargetInstance( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetInstanceRequest.to_json( + compute.InsertTargetInstanceRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetInstancesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetInstanceList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetInstancesRequest): + The request object. A request message for + TargetInstances.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetInstanceList: + Contains a list of TargetInstance + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/targetInstances', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListTargetInstancesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetInstancesRequest.to_json( + compute.ListTargetInstancesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetInstanceList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetInstancesRequest], + compute.TargetInstanceAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetInstanceRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetInstanceRequest], + compute.TargetInstance]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertTargetInstanceRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetInstancesRequest], + compute.TargetInstanceList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'TargetInstancesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/__init__.py new file mode 100644 index 000000000..9c9bc8b00 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetPoolsClient + +__all__ = ( + 'TargetPoolsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/client.py new file mode 100644 index 000000000..1a773c4c3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/client.py @@ -0,0 +1,1436 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_pools import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetPoolsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetPoolsRestTransport + + +class TargetPoolsClientMeta(type): + """Metaclass for the TargetPools client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetPoolsTransport]] + _transport_registry["rest"] = TargetPoolsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetPoolsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetPoolsClient(metaclass=TargetPoolsClientMeta): + """The TargetPools API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetPoolsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetPoolsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetPoolsTransport: + """Returns the transport used by the client instance. + + Returns: + TargetPoolsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetPoolsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target pools client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetPoolsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetPoolsTransport): + # transport is a TargetPoolsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def add_health_check(self, + request: Union[compute.AddHealthCheckTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + target_pools_add_health_check_request_resource: compute.TargetPoolsAddHealthCheckRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds health check URLs to a target pool. + + Args: + request (Union[google.cloud.compute_v1.types.AddHealthCheckTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.AddHealthCheck. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the target pool to add a + health check to. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pools_add_health_check_request_resource (google.cloud.compute_v1.types.TargetPoolsAddHealthCheckRequest): + The body resource for this request + This corresponds to the ``target_pools_add_health_check_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool, target_pools_add_health_check_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddHealthCheckTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddHealthCheckTargetPoolRequest): + request = compute.AddHealthCheckTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + if target_pools_add_health_check_request_resource is not None: + request.target_pools_add_health_check_request_resource = target_pools_add_health_check_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_health_check] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def add_instance(self, + request: Union[compute.AddInstanceTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + target_pools_add_instance_request_resource: compute.TargetPoolsAddInstanceRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Adds an instance to a target pool. + + Args: + request (Union[google.cloud.compute_v1.types.AddInstanceTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.AddInstance. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the TargetPool resource to + add instances to. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pools_add_instance_request_resource (google.cloud.compute_v1.types.TargetPoolsAddInstanceRequest): + The body resource for this request + This corresponds to the ``target_pools_add_instance_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool, target_pools_add_instance_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AddInstanceTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AddInstanceTargetPoolRequest): + request = compute.AddInstanceTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + if target_pools_add_instance_request_resource is not None: + request.target_pools_add_instance_request_resource = target_pools_add_instance_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.add_instance] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def aggregated_list(self, + request: Union[compute.AggregatedListTargetPoolsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of target pools. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListTargetPoolsRequest, dict]): + The request object. A request message for + TargetPools.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_pools.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListTargetPoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListTargetPoolsRequest): + request = compute.AggregatedListTargetPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified target pool. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the TargetPool resource to + delete. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetPoolRequest): + request = compute.DeleteTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetPool: + r"""Returns the specified target pool. Gets a list of + available target pools by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the TargetPool resource to + return. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetPool: + Represents a Target Pool resource. + Target pools are used for network + TCP/UDP load balancing. A target pool + references member instances, an + associated legacy HttpHealthCheck + resource, and, optionally, a backup + target pool. For more information, read + Using target pools. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetPoolRequest): + request = compute.GetTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_health(self, + request: Union[compute.GetHealthTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + instance_reference_resource: compute.InstanceReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetPoolInstanceHealth: + r"""Gets the most recent health check results for each IP + for the instance that is referenced by the given target + pool. + + Args: + request (Union[google.cloud.compute_v1.types.GetHealthTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.GetHealth. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the TargetPool resource to + which the queried instance belongs. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_reference_resource (google.cloud.compute_v1.types.InstanceReference): + The body resource for this request + This corresponds to the ``instance_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetPoolInstanceHealth: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool, instance_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetHealthTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetHealthTargetPoolRequest): + request = compute.GetHealthTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + if instance_reference_resource is not None: + request.instance_reference_resource = instance_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_health] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool_resource: compute.TargetPool = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a target pool in the specified project and + region using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool_resource (google.cloud.compute_v1.types.TargetPool): + The body resource for this request + This corresponds to the ``target_pool_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetPoolRequest): + request = compute.InsertTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool_resource is not None: + request.target_pool_resource = target_pool_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetPoolsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of target pools available to the + specified project and region. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetPoolsRequest, dict]): + The request object. A request message for + TargetPools.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_pools.pagers.ListPager: + Contains a list of TargetPool + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetPoolsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetPoolsRequest): + request = compute.ListTargetPoolsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_health_check(self, + request: Union[compute.RemoveHealthCheckTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + target_pools_remove_health_check_request_resource: compute.TargetPoolsRemoveHealthCheckRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes health check URL from a target pool. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveHealthCheckTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.RemoveHealthCheck. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the target pool to remove + health checks from. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pools_remove_health_check_request_resource (google.cloud.compute_v1.types.TargetPoolsRemoveHealthCheckRequest): + The body resource for this request + This corresponds to the ``target_pools_remove_health_check_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool, target_pools_remove_health_check_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveHealthCheckTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveHealthCheckTargetPoolRequest): + request = compute.RemoveHealthCheckTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + if target_pools_remove_health_check_request_resource is not None: + request.target_pools_remove_health_check_request_resource = target_pools_remove_health_check_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_health_check] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def remove_instance(self, + request: Union[compute.RemoveInstanceTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + target_pools_remove_instance_request_resource: compute.TargetPoolsRemoveInstanceRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Removes instance URL from a target pool. + + Args: + request (Union[google.cloud.compute_v1.types.RemoveInstanceTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.RemoveInstance. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the TargetPool resource to + remove instances from. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pools_remove_instance_request_resource (google.cloud.compute_v1.types.TargetPoolsRemoveInstanceRequest): + The body resource for this request + This corresponds to the ``target_pools_remove_instance_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool, target_pools_remove_instance_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.RemoveInstanceTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.RemoveInstanceTargetPoolRequest): + request = compute.RemoveInstanceTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + if target_pools_remove_instance_request_resource is not None: + request.target_pools_remove_instance_request_resource = target_pools_remove_instance_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.remove_instance] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_backup(self, + request: Union[compute.SetBackupTargetPoolRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_pool: str = None, + target_reference_resource: compute.TargetReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes a backup target pool's configurations. + + Args: + request (Union[google.cloud.compute_v1.types.SetBackupTargetPoolRequest, dict]): + The request object. A request message for + TargetPools.SetBackup. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region scoping this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_pool (str): + Name of the TargetPool resource to + set a backup pool for. + + This corresponds to the ``target_pool`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_reference_resource (google.cloud.compute_v1.types.TargetReference): + The body resource for this request + This corresponds to the ``target_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_pool, target_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetBackupTargetPoolRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetBackupTargetPoolRequest): + request = compute.SetBackupTargetPoolRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_pool is not None: + request.target_pool = target_pool + if target_reference_resource is not None: + request.target_reference_resource = target_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_backup] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetPoolsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/pagers.py new file mode 100644 index 000000000..bf8abe1b6 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetPoolAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetPoolAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetPoolAggregatedList], + request: compute.AggregatedListTargetPoolsRequest, + response: compute.TargetPoolAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListTargetPoolsRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetPoolAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListTargetPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetPoolAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.TargetPoolsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.TargetPoolsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetPoolList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetPoolList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetPoolList], + request: compute.ListTargetPoolsRequest, + response: compute.TargetPoolList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetPoolsRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetPoolList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetPoolsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetPoolList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetPool]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/__init__.py new file mode 100644 index 000000000..24998c9e3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetPoolsTransport +from .rest import TargetPoolsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetPoolsTransport]] +_transport_registry['rest'] = TargetPoolsRestTransport + +__all__ = ( + 'TargetPoolsTransport', + 'TargetPoolsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/base.py new file mode 100644 index 000000000..a6c36a10e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/base.py @@ -0,0 +1,287 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetPoolsTransport(abc.ABC): + """Abstract transport class for TargetPools.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.add_health_check: gapic_v1.method.wrap_method( + self.add_health_check, + default_timeout=None, + client_info=client_info, + ), + self.add_instance: gapic_v1.method.wrap_method( + self.add_instance, + default_timeout=None, + client_info=client_info, + ), + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_health: gapic_v1.method.wrap_method( + self.get_health, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.remove_health_check: gapic_v1.method.wrap_method( + self.remove_health_check, + default_timeout=None, + client_info=client_info, + ), + self.remove_instance: gapic_v1.method.wrap_method( + self.remove_instance, + default_timeout=None, + client_info=client_info, + ), + self.set_backup: gapic_v1.method.wrap_method( + self.set_backup, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def add_health_check(self) -> Callable[ + [compute.AddHealthCheckTargetPoolRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def add_instance(self) -> Callable[ + [compute.AddInstanceTargetPoolRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetPoolsRequest], + Union[ + compute.TargetPoolAggregatedList, + Awaitable[compute.TargetPoolAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetPoolRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetPoolRequest], + Union[ + compute.TargetPool, + Awaitable[compute.TargetPool] + ]]: + raise NotImplementedError() + + @property + def get_health(self) -> Callable[ + [compute.GetHealthTargetPoolRequest], + Union[ + compute.TargetPoolInstanceHealth, + Awaitable[compute.TargetPoolInstanceHealth] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetPoolRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetPoolsRequest], + Union[ + compute.TargetPoolList, + Awaitable[compute.TargetPoolList] + ]]: + raise NotImplementedError() + + @property + def remove_health_check(self) -> Callable[ + [compute.RemoveHealthCheckTargetPoolRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def remove_instance(self) -> Callable[ + [compute.RemoveInstanceTargetPoolRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_backup(self) -> Callable[ + [compute.SetBackupTargetPoolRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetPoolsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/rest.py new file mode 100644 index 000000000..4016f9693 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_pools/transports/rest.py @@ -0,0 +1,1366 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetPoolsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetPoolsRestTransport(TargetPoolsTransport): + """REST backend transport for TargetPools. + + The TargetPools API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _add_health_check(self, + request: compute.AddHealthCheckTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add health check method over HTTP. + + Args: + request (~.compute.AddHealthCheckTargetPoolRequest): + The request object. A request message for + TargetPools.AddHealthCheck. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/addHealthCheck', + 'body': 'target_pools_add_health_check_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.AddHealthCheckTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetPoolsAddHealthCheckRequest.to_json( + compute.TargetPoolsAddHealthCheckRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddHealthCheckTargetPoolRequest.to_json( + compute.AddHealthCheckTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _add_instance(self, + request: compute.AddInstanceTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the add instance method over HTTP. + + Args: + request (~.compute.AddInstanceTargetPoolRequest): + The request object. A request message for + TargetPools.AddInstance. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/addInstance', + 'body': 'target_pools_add_instance_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.AddInstanceTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetPoolsAddInstanceRequest.to_json( + compute.TargetPoolsAddInstanceRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AddInstanceTargetPoolRequest.to_json( + compute.AddInstanceTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _aggregated_list(self, + request: compute.AggregatedListTargetPoolsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetPoolAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListTargetPoolsRequest): + The request object. A request message for + TargetPools.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetPoolAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/targetPools', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListTargetPoolsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListTargetPoolsRequest.to_json( + compute.AggregatedListTargetPoolsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetPoolAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetPoolRequest): + The request object. A request message for + TargetPools.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.DeleteTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetPoolRequest.to_json( + compute.DeleteTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetPool: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetPoolRequest): + The request object. A request message for + TargetPools.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetPool: + Represents a Target Pool resource. + Target pools are used for network + TCP/UDP load balancing. A target pool + references member instances, an + associated legacy HttpHealthCheck + resource, and, optionally, a backup + target pool. For more information, read + Using target pools. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.GetTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetPoolRequest.to_json( + compute.GetTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetPool.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_health(self, + request: compute.GetHealthTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetPoolInstanceHealth: + r"""Call the get health method over HTTP. + + Args: + request (~.compute.GetHealthTargetPoolRequest): + The request object. A request message for + TargetPools.GetHealth. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetPoolInstanceHealth: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/getHealth', + 'body': 'instance_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.GetHealthTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.InstanceReference.to_json( + compute.InstanceReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetHealthTargetPoolRequest.to_json( + compute.GetHealthTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetPoolInstanceHealth.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetPoolRequest): + The request object. A request message for + TargetPools.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools', + 'body': 'target_pool_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetPool.to_json( + compute.TargetPool( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetPoolRequest.to_json( + compute.InsertTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetPoolsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetPoolList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetPoolsRequest): + The request object. A request message for + TargetPools.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetPoolList: + Contains a list of TargetPool + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListTargetPoolsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetPoolsRequest.to_json( + compute.ListTargetPoolsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetPoolList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_health_check(self, + request: compute.RemoveHealthCheckTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove health check method over HTTP. + + Args: + request (~.compute.RemoveHealthCheckTargetPoolRequest): + The request object. A request message for + TargetPools.RemoveHealthCheck. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/removeHealthCheck', + 'body': 'target_pools_remove_health_check_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.RemoveHealthCheckTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetPoolsRemoveHealthCheckRequest.to_json( + compute.TargetPoolsRemoveHealthCheckRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveHealthCheckTargetPoolRequest.to_json( + compute.RemoveHealthCheckTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _remove_instance(self, + request: compute.RemoveInstanceTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the remove instance method over HTTP. + + Args: + request (~.compute.RemoveInstanceTargetPoolRequest): + The request object. A request message for + TargetPools.RemoveInstance. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/removeInstance', + 'body': 'target_pools_remove_instance_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.RemoveInstanceTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetPoolsRemoveInstanceRequest.to_json( + compute.TargetPoolsRemoveInstanceRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.RemoveInstanceTargetPoolRequest.to_json( + compute.RemoveInstanceTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_backup(self, + request: compute.SetBackupTargetPoolRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set backup method over HTTP. + + Args: + request (~.compute.SetBackupTargetPoolRequest): + The request object. A request message for + TargetPools.SetBackup. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/setBackup', + 'body': 'target_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_pool", + "targetPool" + ), + ] + + request_kwargs = compute.SetBackupTargetPoolRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetReference.to_json( + compute.TargetReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetBackupTargetPoolRequest.to_json( + compute.SetBackupTargetPoolRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def add_health_check(self) -> Callable[ + [compute.AddHealthCheckTargetPoolRequest], + compute.Operation]: + return self._add_health_check + @ property + def add_instance(self) -> Callable[ + [compute.AddInstanceTargetPoolRequest], + compute.Operation]: + return self._add_instance + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetPoolsRequest], + compute.TargetPoolAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetPoolRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetPoolRequest], + compute.TargetPool]: + return self._get + @ property + def get_health(self) -> Callable[ + [compute.GetHealthTargetPoolRequest], + compute.TargetPoolInstanceHealth]: + return self._get_health + @ property + def insert(self) -> Callable[ + [compute.InsertTargetPoolRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetPoolsRequest], + compute.TargetPoolList]: + return self._list + @ property + def remove_health_check(self) -> Callable[ + [compute.RemoveHealthCheckTargetPoolRequest], + compute.Operation]: + return self._remove_health_check + @ property + def remove_instance(self) -> Callable[ + [compute.RemoveInstanceTargetPoolRequest], + compute.Operation]: + return self._remove_instance + @ property + def set_backup(self) -> Callable[ + [compute.SetBackupTargetPoolRequest], + compute.Operation]: + return self._set_backup + def close(self): + self._session.close() + + +__all__=( + 'TargetPoolsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/__init__.py new file mode 100644 index 000000000..5f21564c2 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetSslProxiesClient + +__all__ = ( + 'TargetSslProxiesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/client.py new file mode 100644 index 000000000..b6b0b553f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/client.py @@ -0,0 +1,1091 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_ssl_proxies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetSslProxiesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetSslProxiesRestTransport + + +class TargetSslProxiesClientMeta(type): + """Metaclass for the TargetSslProxies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetSslProxiesTransport]] + _transport_registry["rest"] = TargetSslProxiesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetSslProxiesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetSslProxiesClient(metaclass=TargetSslProxiesClientMeta): + """The TargetSslProxies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetSslProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetSslProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetSslProxiesTransport: + """Returns the transport used by the client instance. + + Returns: + TargetSslProxiesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetSslProxiesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target ssl proxies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetSslProxiesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetSslProxiesTransport): + # transport is a TargetSslProxiesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteTargetSslProxyRequest, dict] = None, + *, + project: str = None, + target_ssl_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetSslProxy resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetSslProxyRequest, dict]): + The request object. A request message for + TargetSslProxies.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxy (str): + Name of the TargetSslProxy resource + to delete. + + This corresponds to the ``target_ssl_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_ssl_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetSslProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetSslProxyRequest): + request = compute.DeleteTargetSslProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_ssl_proxy is not None: + request.target_ssl_proxy = target_ssl_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetSslProxyRequest, dict] = None, + *, + project: str = None, + target_ssl_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetSslProxy: + r"""Returns the specified TargetSslProxy resource. Gets a + list of available target SSL proxies by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetSslProxyRequest, dict]): + The request object. A request message for + TargetSslProxies.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxy (str): + Name of the TargetSslProxy resource + to return. + + This corresponds to the ``target_ssl_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetSslProxy: + Represents a Target SSL Proxy + resource. A target SSL proxy is a + component of a SSL Proxy load balancer. + Global forwarding rules reference a + target SSL proxy, and the target proxy + then references an external backend + service. For more information, read + Using Target Proxies. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_ssl_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetSslProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetSslProxyRequest): + request = compute.GetTargetSslProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_ssl_proxy is not None: + request.target_ssl_proxy = target_ssl_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetSslProxyRequest, dict] = None, + *, + project: str = None, + target_ssl_proxy_resource: compute.TargetSslProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetSslProxy resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetSslProxyRequest, dict]): + The request object. A request message for + TargetSslProxies.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxy_resource (google.cloud.compute_v1.types.TargetSslProxy): + The body resource for this request + This corresponds to the ``target_ssl_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_ssl_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetSslProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetSslProxyRequest): + request = compute.InsertTargetSslProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_ssl_proxy_resource is not None: + request.target_ssl_proxy_resource = target_ssl_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetSslProxiesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of TargetSslProxy resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetSslProxiesRequest, dict]): + The request object. A request message for + TargetSslProxies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_ssl_proxies.pagers.ListPager: + Contains a list of TargetSslProxy + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetSslProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetSslProxiesRequest): + request = compute.ListTargetSslProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_backend_service(self, + request: Union[compute.SetBackendServiceTargetSslProxyRequest, dict] = None, + *, + project: str = None, + target_ssl_proxy: str = None, + target_ssl_proxies_set_backend_service_request_resource: compute.TargetSslProxiesSetBackendServiceRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the BackendService for TargetSslProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetBackendServiceTargetSslProxyRequest, dict]): + The request object. A request message for + TargetSslProxies.SetBackendService. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxy (str): + Name of the TargetSslProxy resource + whose BackendService resource is to be + set. + + This corresponds to the ``target_ssl_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxies_set_backend_service_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetBackendServiceRequest): + The body resource for this request + This corresponds to the ``target_ssl_proxies_set_backend_service_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_ssl_proxy, target_ssl_proxies_set_backend_service_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetBackendServiceTargetSslProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetBackendServiceTargetSslProxyRequest): + request = compute.SetBackendServiceTargetSslProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_ssl_proxy is not None: + request.target_ssl_proxy = target_ssl_proxy + if target_ssl_proxies_set_backend_service_request_resource is not None: + request.target_ssl_proxies_set_backend_service_request_resource = target_ssl_proxies_set_backend_service_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_backend_service] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_proxy_header(self, + request: Union[compute.SetProxyHeaderTargetSslProxyRequest, dict] = None, + *, + project: str = None, + target_ssl_proxy: str = None, + target_ssl_proxies_set_proxy_header_request_resource: compute.TargetSslProxiesSetProxyHeaderRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the ProxyHeaderType for TargetSslProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetProxyHeaderTargetSslProxyRequest, dict]): + The request object. A request message for + TargetSslProxies.SetProxyHeader. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxy (str): + Name of the TargetSslProxy resource + whose ProxyHeader is to be set. + + This corresponds to the ``target_ssl_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxies_set_proxy_header_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetProxyHeaderRequest): + The body resource for this request + This corresponds to the ``target_ssl_proxies_set_proxy_header_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_ssl_proxy, target_ssl_proxies_set_proxy_header_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetProxyHeaderTargetSslProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetProxyHeaderTargetSslProxyRequest): + request = compute.SetProxyHeaderTargetSslProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_ssl_proxy is not None: + request.target_ssl_proxy = target_ssl_proxy + if target_ssl_proxies_set_proxy_header_request_resource is not None: + request.target_ssl_proxies_set_proxy_header_request_resource = target_ssl_proxies_set_proxy_header_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_proxy_header] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_ssl_certificates(self, + request: Union[compute.SetSslCertificatesTargetSslProxyRequest, dict] = None, + *, + project: str = None, + target_ssl_proxy: str = None, + target_ssl_proxies_set_ssl_certificates_request_resource: compute.TargetSslProxiesSetSslCertificatesRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes SslCertificates for TargetSslProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetSslCertificatesTargetSslProxyRequest, dict]): + The request object. A request message for + TargetSslProxies.SetSslCertificates. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxy (str): + Name of the TargetSslProxy resource + whose SslCertificate resource is to be + set. + + This corresponds to the ``target_ssl_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxies_set_ssl_certificates_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetSslCertificatesRequest): + The body resource for this request + This corresponds to the ``target_ssl_proxies_set_ssl_certificates_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_ssl_proxy, target_ssl_proxies_set_ssl_certificates_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetSslCertificatesTargetSslProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetSslCertificatesTargetSslProxyRequest): + request = compute.SetSslCertificatesTargetSslProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_ssl_proxy is not None: + request.target_ssl_proxy = target_ssl_proxy + if target_ssl_proxies_set_ssl_certificates_request_resource is not None: + request.target_ssl_proxies_set_ssl_certificates_request_resource = target_ssl_proxies_set_ssl_certificates_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_ssl_certificates] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_ssl_policy(self, + request: Union[compute.SetSslPolicyTargetSslProxyRequest, dict] = None, + *, + project: str = None, + target_ssl_proxy: str = None, + ssl_policy_reference_resource: compute.SslPolicyReference = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the SSL policy for TargetSslProxy. The SSL + policy specifies the server-side support for SSL + features. This affects connections between clients and + the SSL proxy load balancer. They do not affect the + connection between the load balancer and the backends. + + Args: + request (Union[google.cloud.compute_v1.types.SetSslPolicyTargetSslProxyRequest, dict]): + The request object. A request message for + TargetSslProxies.SetSslPolicy. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_ssl_proxy (str): + Name of the TargetSslProxy resource + whose SSL policy is to be set. The name + must be 1-63 characters long, and comply + with RFC1035. + + This corresponds to the ``target_ssl_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + ssl_policy_reference_resource (google.cloud.compute_v1.types.SslPolicyReference): + The body resource for this request + This corresponds to the ``ssl_policy_reference_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_ssl_proxy, ssl_policy_reference_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetSslPolicyTargetSslProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetSslPolicyTargetSslProxyRequest): + request = compute.SetSslPolicyTargetSslProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_ssl_proxy is not None: + request.target_ssl_proxy = target_ssl_proxy + if ssl_policy_reference_resource is not None: + request.ssl_policy_reference_resource = ssl_policy_reference_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_ssl_policy] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetSslProxiesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/pagers.py new file mode 100644 index 000000000..e3a55ba99 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetSslProxyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetSslProxyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetSslProxyList], + request: compute.ListTargetSslProxiesRequest, + response: compute.TargetSslProxyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetSslProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetSslProxyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetSslProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetSslProxyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetSslProxy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/__init__.py new file mode 100644 index 000000000..af17c543f --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetSslProxiesTransport +from .rest import TargetSslProxiesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetSslProxiesTransport]] +_transport_registry['rest'] = TargetSslProxiesRestTransport + +__all__ = ( + 'TargetSslProxiesTransport', + 'TargetSslProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/base.py new file mode 100644 index 000000000..777ae41fc --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/base.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetSslProxiesTransport(abc.ABC): + """Abstract transport class for TargetSslProxies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_backend_service: gapic_v1.method.wrap_method( + self.set_backend_service, + default_timeout=None, + client_info=client_info, + ), + self.set_proxy_header: gapic_v1.method.wrap_method( + self.set_proxy_header, + default_timeout=None, + client_info=client_info, + ), + self.set_ssl_certificates: gapic_v1.method.wrap_method( + self.set_ssl_certificates, + default_timeout=None, + client_info=client_info, + ), + self.set_ssl_policy: gapic_v1.method.wrap_method( + self.set_ssl_policy, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetSslProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetSslProxyRequest], + Union[ + compute.TargetSslProxy, + Awaitable[compute.TargetSslProxy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetSslProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetSslProxiesRequest], + Union[ + compute.TargetSslProxyList, + Awaitable[compute.TargetSslProxyList] + ]]: + raise NotImplementedError() + + @property + def set_backend_service(self) -> Callable[ + [compute.SetBackendServiceTargetSslProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_proxy_header(self) -> Callable[ + [compute.SetProxyHeaderTargetSslProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_ssl_certificates(self) -> Callable[ + [compute.SetSslCertificatesTargetSslProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_ssl_policy(self) -> Callable[ + [compute.SetSslPolicyTargetSslProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetSslProxiesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/rest.py new file mode 100644 index 000000000..4fb82327a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_ssl_proxies/transports/rest.py @@ -0,0 +1,1015 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetSslProxiesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetSslProxiesRestTransport(TargetSslProxiesTransport): + """REST backend transport for TargetSslProxies. + + The TargetSslProxies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteTargetSslProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetSslProxyRequest): + The request object. A request message for + TargetSslProxies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_ssl_proxy", + "targetSslProxy" + ), + ] + + request_kwargs = compute.DeleteTargetSslProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetSslProxyRequest.to_json( + compute.DeleteTargetSslProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetSslProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetSslProxy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetSslProxyRequest): + The request object. A request message for + TargetSslProxies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetSslProxy: + Represents a Target SSL Proxy + resource. A target SSL proxy is a + component of a SSL Proxy load balancer. + Global forwarding rules reference a + target SSL proxy, and the target proxy + then references an external backend + service. For more information, read + Using Target Proxies. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_ssl_proxy", + "targetSslProxy" + ), + ] + + request_kwargs = compute.GetTargetSslProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetSslProxyRequest.to_json( + compute.GetTargetSslProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetSslProxy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetSslProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetSslProxyRequest): + The request object. A request message for + TargetSslProxies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies', + 'body': 'target_ssl_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertTargetSslProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetSslProxy.to_json( + compute.TargetSslProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetSslProxyRequest.to_json( + compute.InsertTargetSslProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetSslProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetSslProxyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetSslProxiesRequest): + The request object. A request message for + TargetSslProxies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetSslProxyList: + Contains a list of TargetSslProxy + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListTargetSslProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetSslProxiesRequest.to_json( + compute.ListTargetSslProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetSslProxyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_backend_service(self, + request: compute.SetBackendServiceTargetSslProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set backend service method over HTTP. + + Args: + request (~.compute.SetBackendServiceTargetSslProxyRequest): + The request object. A request message for + TargetSslProxies.SetBackendService. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setBackendService', + 'body': 'target_ssl_proxies_set_backend_service_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_ssl_proxy", + "targetSslProxy" + ), + ] + + request_kwargs = compute.SetBackendServiceTargetSslProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetSslProxiesSetBackendServiceRequest.to_json( + compute.TargetSslProxiesSetBackendServiceRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetBackendServiceTargetSslProxyRequest.to_json( + compute.SetBackendServiceTargetSslProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_proxy_header(self, + request: compute.SetProxyHeaderTargetSslProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set proxy header method over HTTP. + + Args: + request (~.compute.SetProxyHeaderTargetSslProxyRequest): + The request object. A request message for + TargetSslProxies.SetProxyHeader. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setProxyHeader', + 'body': 'target_ssl_proxies_set_proxy_header_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_ssl_proxy", + "targetSslProxy" + ), + ] + + request_kwargs = compute.SetProxyHeaderTargetSslProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetSslProxiesSetProxyHeaderRequest.to_json( + compute.TargetSslProxiesSetProxyHeaderRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetProxyHeaderTargetSslProxyRequest.to_json( + compute.SetProxyHeaderTargetSslProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_ssl_certificates(self, + request: compute.SetSslCertificatesTargetSslProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set ssl certificates method over HTTP. + + Args: + request (~.compute.SetSslCertificatesTargetSslProxyRequest): + The request object. A request message for + TargetSslProxies.SetSslCertificates. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setSslCertificates', + 'body': 'target_ssl_proxies_set_ssl_certificates_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_ssl_proxy", + "targetSslProxy" + ), + ] + + request_kwargs = compute.SetSslCertificatesTargetSslProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetSslProxiesSetSslCertificatesRequest.to_json( + compute.TargetSslProxiesSetSslCertificatesRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetSslCertificatesTargetSslProxyRequest.to_json( + compute.SetSslCertificatesTargetSslProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_ssl_policy(self, + request: compute.SetSslPolicyTargetSslProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set ssl policy method over HTTP. + + Args: + request (~.compute.SetSslPolicyTargetSslProxyRequest): + The request object. A request message for + TargetSslProxies.SetSslPolicy. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setSslPolicy', + 'body': 'ssl_policy_reference_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_ssl_proxy", + "targetSslProxy" + ), + ] + + request_kwargs = compute.SetSslPolicyTargetSslProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.SslPolicyReference.to_json( + compute.SslPolicyReference( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetSslPolicyTargetSslProxyRequest.to_json( + compute.SetSslPolicyTargetSslProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetSslProxyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetSslProxyRequest], + compute.TargetSslProxy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertTargetSslProxyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetSslProxiesRequest], + compute.TargetSslProxyList]: + return self._list + @ property + def set_backend_service(self) -> Callable[ + [compute.SetBackendServiceTargetSslProxyRequest], + compute.Operation]: + return self._set_backend_service + @ property + def set_proxy_header(self) -> Callable[ + [compute.SetProxyHeaderTargetSslProxyRequest], + compute.Operation]: + return self._set_proxy_header + @ property + def set_ssl_certificates(self) -> Callable[ + [compute.SetSslCertificatesTargetSslProxyRequest], + compute.Operation]: + return self._set_ssl_certificates + @ property + def set_ssl_policy(self) -> Callable[ + [compute.SetSslPolicyTargetSslProxyRequest], + compute.Operation]: + return self._set_ssl_policy + def close(self): + self._session.close() + + +__all__=( + 'TargetSslProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/__init__.py new file mode 100644 index 000000000..2905119ac --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetTcpProxiesClient + +__all__ = ( + 'TargetTcpProxiesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/client.py new file mode 100644 index 000000000..b01aa784b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/client.py @@ -0,0 +1,890 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_tcp_proxies import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetTcpProxiesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetTcpProxiesRestTransport + + +class TargetTcpProxiesClientMeta(type): + """Metaclass for the TargetTcpProxies client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetTcpProxiesTransport]] + _transport_registry["rest"] = TargetTcpProxiesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetTcpProxiesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetTcpProxiesClient(metaclass=TargetTcpProxiesClientMeta): + """The TargetTcpProxies API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetTcpProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetTcpProxiesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetTcpProxiesTransport: + """Returns the transport used by the client instance. + + Returns: + TargetTcpProxiesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetTcpProxiesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target tcp proxies client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetTcpProxiesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetTcpProxiesTransport): + # transport is a TargetTcpProxiesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteTargetTcpProxyRequest, dict] = None, + *, + project: str = None, + target_tcp_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified TargetTcpProxy resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetTcpProxyRequest, dict]): + The request object. A request message for + TargetTcpProxies.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_tcp_proxy (str): + Name of the TargetTcpProxy resource + to delete. + + This corresponds to the ``target_tcp_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_tcp_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetTcpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetTcpProxyRequest): + request = compute.DeleteTargetTcpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_tcp_proxy is not None: + request.target_tcp_proxy = target_tcp_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetTcpProxyRequest, dict] = None, + *, + project: str = None, + target_tcp_proxy: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetTcpProxy: + r"""Returns the specified TargetTcpProxy resource. Gets a + list of available target TCP proxies by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetTcpProxyRequest, dict]): + The request object. A request message for + TargetTcpProxies.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_tcp_proxy (str): + Name of the TargetTcpProxy resource + to return. + + This corresponds to the ``target_tcp_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetTcpProxy: + Represents a Target TCP Proxy + resource. A target TCP proxy is a + component of a TCP Proxy load balancer. + Global forwarding rules reference target + TCP proxy, and the target proxy then + references an external backend service. + For more information, read TCP Proxy + Load Balancing overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_tcp_proxy]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetTcpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetTcpProxyRequest): + request = compute.GetTargetTcpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_tcp_proxy is not None: + request.target_tcp_proxy = target_tcp_proxy + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetTcpProxyRequest, dict] = None, + *, + project: str = None, + target_tcp_proxy_resource: compute.TargetTcpProxy = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a TargetTcpProxy resource in the specified + project using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetTcpProxyRequest, dict]): + The request object. A request message for + TargetTcpProxies.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_tcp_proxy_resource (google.cloud.compute_v1.types.TargetTcpProxy): + The body resource for this request + This corresponds to the ``target_tcp_proxy_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_tcp_proxy_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetTcpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetTcpProxyRequest): + request = compute.InsertTargetTcpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_tcp_proxy_resource is not None: + request.target_tcp_proxy_resource = target_tcp_proxy_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetTcpProxiesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of TargetTcpProxy resources + available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetTcpProxiesRequest, dict]): + The request object. A request message for + TargetTcpProxies.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_tcp_proxies.pagers.ListPager: + Contains a list of TargetTcpProxy + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetTcpProxiesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetTcpProxiesRequest): + request = compute.ListTargetTcpProxiesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_backend_service(self, + request: Union[compute.SetBackendServiceTargetTcpProxyRequest, dict] = None, + *, + project: str = None, + target_tcp_proxy: str = None, + target_tcp_proxies_set_backend_service_request_resource: compute.TargetTcpProxiesSetBackendServiceRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the BackendService for TargetTcpProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetBackendServiceTargetTcpProxyRequest, dict]): + The request object. A request message for + TargetTcpProxies.SetBackendService. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_tcp_proxy (str): + Name of the TargetTcpProxy resource + whose BackendService resource is to be + set. + + This corresponds to the ``target_tcp_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_tcp_proxies_set_backend_service_request_resource (google.cloud.compute_v1.types.TargetTcpProxiesSetBackendServiceRequest): + The body resource for this request + This corresponds to the ``target_tcp_proxies_set_backend_service_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_tcp_proxy, target_tcp_proxies_set_backend_service_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetBackendServiceTargetTcpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetBackendServiceTargetTcpProxyRequest): + request = compute.SetBackendServiceTargetTcpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_tcp_proxy is not None: + request.target_tcp_proxy = target_tcp_proxy + if target_tcp_proxies_set_backend_service_request_resource is not None: + request.target_tcp_proxies_set_backend_service_request_resource = target_tcp_proxies_set_backend_service_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_backend_service] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_proxy_header(self, + request: Union[compute.SetProxyHeaderTargetTcpProxyRequest, dict] = None, + *, + project: str = None, + target_tcp_proxy: str = None, + target_tcp_proxies_set_proxy_header_request_resource: compute.TargetTcpProxiesSetProxyHeaderRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Changes the ProxyHeaderType for TargetTcpProxy. + + Args: + request (Union[google.cloud.compute_v1.types.SetProxyHeaderTargetTcpProxyRequest, dict]): + The request object. A request message for + TargetTcpProxies.SetProxyHeader. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_tcp_proxy (str): + Name of the TargetTcpProxy resource + whose ProxyHeader is to be set. + + This corresponds to the ``target_tcp_proxy`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_tcp_proxies_set_proxy_header_request_resource (google.cloud.compute_v1.types.TargetTcpProxiesSetProxyHeaderRequest): + The body resource for this request + This corresponds to the ``target_tcp_proxies_set_proxy_header_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, target_tcp_proxy, target_tcp_proxies_set_proxy_header_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetProxyHeaderTargetTcpProxyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetProxyHeaderTargetTcpProxyRequest): + request = compute.SetProxyHeaderTargetTcpProxyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if target_tcp_proxy is not None: + request.target_tcp_proxy = target_tcp_proxy + if target_tcp_proxies_set_proxy_header_request_resource is not None: + request.target_tcp_proxies_set_proxy_header_request_resource = target_tcp_proxies_set_proxy_header_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_proxy_header] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetTcpProxiesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/pagers.py new file mode 100644 index 000000000..20e6a9c63 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetTcpProxyList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetTcpProxyList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetTcpProxyList], + request: compute.ListTargetTcpProxiesRequest, + response: compute.TargetTcpProxyList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetTcpProxiesRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetTcpProxyList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetTcpProxiesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetTcpProxyList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetTcpProxy]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/__init__.py new file mode 100644 index 000000000..5921f380e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetTcpProxiesTransport +from .rest import TargetTcpProxiesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetTcpProxiesTransport]] +_transport_registry['rest'] = TargetTcpProxiesRestTransport + +__all__ = ( + 'TargetTcpProxiesTransport', + 'TargetTcpProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/base.py new file mode 100644 index 000000000..1599b9e25 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/base.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetTcpProxiesTransport(abc.ABC): + """Abstract transport class for TargetTcpProxies.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_backend_service: gapic_v1.method.wrap_method( + self.set_backend_service, + default_timeout=None, + client_info=client_info, + ), + self.set_proxy_header: gapic_v1.method.wrap_method( + self.set_proxy_header, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetTcpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetTcpProxyRequest], + Union[ + compute.TargetTcpProxy, + Awaitable[compute.TargetTcpProxy] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetTcpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetTcpProxiesRequest], + Union[ + compute.TargetTcpProxyList, + Awaitable[compute.TargetTcpProxyList] + ]]: + raise NotImplementedError() + + @property + def set_backend_service(self) -> Callable[ + [compute.SetBackendServiceTargetTcpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def set_proxy_header(self) -> Callable[ + [compute.SetProxyHeaderTargetTcpProxyRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetTcpProxiesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/rest.py new file mode 100644 index 000000000..ca3ebb698 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_tcp_proxies/transports/rest.py @@ -0,0 +1,779 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetTcpProxiesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetTcpProxiesRestTransport(TargetTcpProxiesTransport): + """REST backend transport for TargetTcpProxies. + + The TargetTcpProxies API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteTargetTcpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetTcpProxyRequest): + The request object. A request message for + TargetTcpProxies.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_tcp_proxy", + "targetTcpProxy" + ), + ] + + request_kwargs = compute.DeleteTargetTcpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetTcpProxyRequest.to_json( + compute.DeleteTargetTcpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetTcpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetTcpProxy: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetTcpProxyRequest): + The request object. A request message for + TargetTcpProxies.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetTcpProxy: + Represents a Target TCP Proxy + resource. A target TCP proxy is a + component of a TCP Proxy load balancer. + Global forwarding rules reference target + TCP proxy, and the target proxy then + references an external backend service. + For more information, read TCP Proxy + Load Balancing overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_tcp_proxy", + "targetTcpProxy" + ), + ] + + request_kwargs = compute.GetTargetTcpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetTcpProxyRequest.to_json( + compute.GetTargetTcpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetTcpProxy.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetTcpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetTcpProxyRequest): + The request object. A request message for + TargetTcpProxies.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetTcpProxies', + 'body': 'target_tcp_proxy_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertTargetTcpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetTcpProxy.to_json( + compute.TargetTcpProxy( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetTcpProxyRequest.to_json( + compute.InsertTargetTcpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetTcpProxiesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetTcpProxyList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetTcpProxiesRequest): + The request object. A request message for + TargetTcpProxies.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetTcpProxyList: + Contains a list of TargetTcpProxy + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/targetTcpProxies', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListTargetTcpProxiesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetTcpProxiesRequest.to_json( + compute.ListTargetTcpProxiesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetTcpProxyList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_backend_service(self, + request: compute.SetBackendServiceTargetTcpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set backend service method over HTTP. + + Args: + request (~.compute.SetBackendServiceTargetTcpProxyRequest): + The request object. A request message for + TargetTcpProxies.SetBackendService. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}/setBackendService', + 'body': 'target_tcp_proxies_set_backend_service_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_tcp_proxy", + "targetTcpProxy" + ), + ] + + request_kwargs = compute.SetBackendServiceTargetTcpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetTcpProxiesSetBackendServiceRequest.to_json( + compute.TargetTcpProxiesSetBackendServiceRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetBackendServiceTargetTcpProxyRequest.to_json( + compute.SetBackendServiceTargetTcpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_proxy_header(self, + request: compute.SetProxyHeaderTargetTcpProxyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set proxy header method over HTTP. + + Args: + request (~.compute.SetProxyHeaderTargetTcpProxyRequest): + The request object. A request message for + TargetTcpProxies.SetProxyHeader. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}/setProxyHeader', + 'body': 'target_tcp_proxies_set_proxy_header_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "target_tcp_proxy", + "targetTcpProxy" + ), + ] + + request_kwargs = compute.SetProxyHeaderTargetTcpProxyRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetTcpProxiesSetProxyHeaderRequest.to_json( + compute.TargetTcpProxiesSetProxyHeaderRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetProxyHeaderTargetTcpProxyRequest.to_json( + compute.SetProxyHeaderTargetTcpProxyRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetTcpProxyRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetTcpProxyRequest], + compute.TargetTcpProxy]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertTargetTcpProxyRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetTcpProxiesRequest], + compute.TargetTcpProxyList]: + return self._list + @ property + def set_backend_service(self) -> Callable[ + [compute.SetBackendServiceTargetTcpProxyRequest], + compute.Operation]: + return self._set_backend_service + @ property + def set_proxy_header(self) -> Callable[ + [compute.SetProxyHeaderTargetTcpProxyRequest], + compute.Operation]: + return self._set_proxy_header + def close(self): + self._session.close() + + +__all__=( + 'TargetTcpProxiesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/__init__.py new file mode 100644 index 000000000..eb446b0b7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import TargetVpnGatewaysClient + +__all__ = ( + 'TargetVpnGatewaysClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/client.py new file mode 100644 index 000000000..1cf22548b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/client.py @@ -0,0 +1,800 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.target_vpn_gateways import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import TargetVpnGatewaysTransport, DEFAULT_CLIENT_INFO +from .transports.rest import TargetVpnGatewaysRestTransport + + +class TargetVpnGatewaysClientMeta(type): + """Metaclass for the TargetVpnGateways client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[TargetVpnGatewaysTransport]] + _transport_registry["rest"] = TargetVpnGatewaysRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[TargetVpnGatewaysTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class TargetVpnGatewaysClient(metaclass=TargetVpnGatewaysClientMeta): + """The TargetVpnGateways API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetVpnGatewaysClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + TargetVpnGatewaysClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> TargetVpnGatewaysTransport: + """Returns the transport used by the client instance. + + Returns: + TargetVpnGatewaysTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, TargetVpnGatewaysTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the target vpn gateways client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, TargetVpnGatewaysTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, TargetVpnGatewaysTransport): + # transport is a TargetVpnGatewaysTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListTargetVpnGatewaysRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of target VPN gateways. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListTargetVpnGatewaysRequest, dict]): + The request object. A request message for + TargetVpnGateways.AggregatedList. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_vpn_gateways.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListTargetVpnGatewaysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListTargetVpnGatewaysRequest): + request = compute.AggregatedListTargetVpnGatewaysRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteTargetVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_vpn_gateway: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified target VPN gateway. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteTargetVpnGatewayRequest, dict]): + The request object. A request message for + TargetVpnGateways.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_vpn_gateway (str): + Name of the target VPN gateway to + delete. + + This corresponds to the ``target_vpn_gateway`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_vpn_gateway]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteTargetVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteTargetVpnGatewayRequest): + request = compute.DeleteTargetVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_vpn_gateway is not None: + request.target_vpn_gateway = target_vpn_gateway + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetTargetVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_vpn_gateway: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TargetVpnGateway: + r"""Returns the specified target VPN gateway. Gets a list + of available target VPN gateways by making a list() + request. + + Args: + request (Union[google.cloud.compute_v1.types.GetTargetVpnGatewayRequest, dict]): + The request object. A request message for + TargetVpnGateways.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_vpn_gateway (str): + Name of the target VPN gateway to + return. + + This corresponds to the ``target_vpn_gateway`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TargetVpnGateway: + Represents a Target VPN Gateway + resource. The target VPN gateway + resource represents a Classic Cloud VPN + gateway. For more information, read the + the Cloud VPN Overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_vpn_gateway]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetTargetVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetTargetVpnGatewayRequest): + request = compute.GetTargetVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_vpn_gateway is not None: + request.target_vpn_gateway = target_vpn_gateway + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertTargetVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + target_vpn_gateway_resource: compute.TargetVpnGateway = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a target VPN gateway in the specified project + and region using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertTargetVpnGatewayRequest, dict]): + The request object. A request message for + TargetVpnGateways.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + target_vpn_gateway_resource (google.cloud.compute_v1.types.TargetVpnGateway): + The body resource for this request + This corresponds to the ``target_vpn_gateway_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, target_vpn_gateway_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertTargetVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertTargetVpnGatewayRequest): + request = compute.InsertTargetVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if target_vpn_gateway_resource is not None: + request.target_vpn_gateway_resource = target_vpn_gateway_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListTargetVpnGatewaysRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of target VPN gateways available to + the specified project and region. + + Args: + request (Union[google.cloud.compute_v1.types.ListTargetVpnGatewaysRequest, dict]): + The request object. A request message for + TargetVpnGateways.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.target_vpn_gateways.pagers.ListPager: + Contains a list of TargetVpnGateway + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListTargetVpnGatewaysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListTargetVpnGatewaysRequest): + request = compute.ListTargetVpnGatewaysRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "TargetVpnGatewaysClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/pagers.py new file mode 100644 index 000000000..f4a765013 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetVpnGatewayAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetVpnGatewayAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetVpnGatewayAggregatedList], + request: compute.AggregatedListTargetVpnGatewaysRequest, + response: compute.TargetVpnGatewayAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListTargetVpnGatewaysRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetVpnGatewayAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListTargetVpnGatewaysRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetVpnGatewayAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.TargetVpnGatewaysScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.TargetVpnGatewaysScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.TargetVpnGatewayList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.TargetVpnGatewayList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.TargetVpnGatewayList], + request: compute.ListTargetVpnGatewaysRequest, + response: compute.TargetVpnGatewayList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListTargetVpnGatewaysRequest): + The initial request object. + response (google.cloud.compute_v1.types.TargetVpnGatewayList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListTargetVpnGatewaysRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.TargetVpnGatewayList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.TargetVpnGateway]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/__init__.py new file mode 100644 index 000000000..be0b9b652 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import TargetVpnGatewaysTransport +from .rest import TargetVpnGatewaysRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[TargetVpnGatewaysTransport]] +_transport_registry['rest'] = TargetVpnGatewaysRestTransport + +__all__ = ( + 'TargetVpnGatewaysTransport', + 'TargetVpnGatewaysRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/base.py new file mode 100644 index 000000000..93b010030 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class TargetVpnGatewaysTransport(abc.ABC): + """Abstract transport class for TargetVpnGateways.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetVpnGatewaysRequest], + Union[ + compute.TargetVpnGatewayAggregatedList, + Awaitable[compute.TargetVpnGatewayAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteTargetVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetTargetVpnGatewayRequest], + Union[ + compute.TargetVpnGateway, + Awaitable[compute.TargetVpnGateway] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertTargetVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListTargetVpnGatewaysRequest], + Union[ + compute.TargetVpnGatewayList, + Awaitable[compute.TargetVpnGatewayList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'TargetVpnGatewaysTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/rest.py new file mode 100644 index 000000000..fb16c2392 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/target_vpn_gateways/transports/rest.py @@ -0,0 +1,646 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import TargetVpnGatewaysTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class TargetVpnGatewaysRestTransport(TargetVpnGatewaysTransport): + """REST backend transport for TargetVpnGateways. + + The TargetVpnGateways API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListTargetVpnGatewaysRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetVpnGatewayAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListTargetVpnGatewaysRequest): + The request object. A request message for + TargetVpnGateways.AggregatedList. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetVpnGatewayAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/targetVpnGateways', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListTargetVpnGatewaysRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListTargetVpnGatewaysRequest.to_json( + compute.AggregatedListTargetVpnGatewaysRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetVpnGatewayAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteTargetVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteTargetVpnGatewayRequest): + The request object. A request message for + TargetVpnGateways.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{target_vpn_gateway}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_vpn_gateway", + "targetVpnGateway" + ), + ] + + request_kwargs = compute.DeleteTargetVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteTargetVpnGatewayRequest.to_json( + compute.DeleteTargetVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetTargetVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetVpnGateway: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetTargetVpnGatewayRequest): + The request object. A request message for + TargetVpnGateways.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetVpnGateway: + Represents a Target VPN Gateway + resource. The target VPN gateway + resource represents a Classic Cloud VPN + gateway. For more information, read the + the Cloud VPN Overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{target_vpn_gateway}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "target_vpn_gateway", + "targetVpnGateway" + ), + ] + + request_kwargs = compute.GetTargetVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetTargetVpnGatewayRequest.to_json( + compute.GetTargetVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetVpnGateway.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertTargetVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertTargetVpnGatewayRequest): + The request object. A request message for + TargetVpnGateways.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetVpnGateways', + 'body': 'target_vpn_gateway_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertTargetVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TargetVpnGateway.to_json( + compute.TargetVpnGateway( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertTargetVpnGatewayRequest.to_json( + compute.InsertTargetVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListTargetVpnGatewaysRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TargetVpnGatewayList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListTargetVpnGatewaysRequest): + The request object. A request message for + TargetVpnGateways.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TargetVpnGatewayList: + Contains a list of TargetVpnGateway + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/targetVpnGateways', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListTargetVpnGatewaysRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListTargetVpnGatewaysRequest.to_json( + compute.ListTargetVpnGatewaysRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TargetVpnGatewayList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListTargetVpnGatewaysRequest], + compute.TargetVpnGatewayAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteTargetVpnGatewayRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetTargetVpnGatewayRequest], + compute.TargetVpnGateway]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertTargetVpnGatewayRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListTargetVpnGatewaysRequest], + compute.TargetVpnGatewayList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'TargetVpnGatewaysRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/__init__.py new file mode 100644 index 000000000..dcd02d03b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import UrlMapsClient + +__all__ = ( + 'UrlMapsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/client.py new file mode 100644 index 000000000..181a674f4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/client.py @@ -0,0 +1,1159 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.url_maps import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import UrlMapsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import UrlMapsRestTransport + + +class UrlMapsClientMeta(type): + """Metaclass for the UrlMaps client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[UrlMapsTransport]] + _transport_registry["rest"] = UrlMapsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[UrlMapsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class UrlMapsClient(metaclass=UrlMapsClientMeta): + """The UrlMaps API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + UrlMapsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + UrlMapsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> UrlMapsTransport: + """Returns the transport used by the client instance. + + Returns: + UrlMapsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, UrlMapsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the url maps client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, UrlMapsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, UrlMapsTransport): + # transport is a UrlMapsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListUrlMapsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves the list of all UrlMap resources, regional + and global, available to the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListUrlMapsRequest, dict]): + The request object. A request message for + UrlMaps.AggregatedList. See the method description for + details. + project (str): + Name of the project scoping this + request. + + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.url_maps.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListUrlMapsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListUrlMapsRequest): + request = compute.AggregatedListUrlMapsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteUrlMapRequest, dict] = None, + *, + project: str = None, + url_map: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified UrlMap resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteUrlMapRequest, dict]): + The request object. A request message for + UrlMaps.Delete. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to + delete. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, url_map]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteUrlMapRequest): + request = compute.DeleteUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if url_map is not None: + request.url_map = url_map + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetUrlMapRequest, dict] = None, + *, + project: str = None, + url_map: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.UrlMap: + r"""Returns the specified UrlMap resource. Gets a list of + available URL maps by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetUrlMapRequest, dict]): + The request object. A request message for UrlMaps.Get. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to + return. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.UrlMap: + Represents a URL Map resource. Google Compute Engine has + two URL Map resources: \* + [Global](/compute/docs/reference/rest/v1/urlMaps) \* + [Regional](/compute/docs/reference/rest/v1/regionUrlMaps) + A URL map resource is a component of certain types of + GCP load balancers and Traffic Director. \* urlMaps are + used by external HTTP(S) load balancers and Traffic + Director. \* regionUrlMaps are used by internal HTTP(S) + load balancers. For a list of supported URL map features + by load balancer type, see the Load balancing features: + Routing and traffic management table. For a list of + supported URL map features for Traffic Director, see the + Traffic Director features: Routing and traffic + management table. This resource defines mappings from + host names and URL paths to either a backend service or + a backend bucket. To use the global urlMaps resource, + the backend service must have a loadBalancingScheme of + either EXTERNAL or INTERNAL_SELF_MANAGED. To use the + regionUrlMaps resource, the backend service must have a + loadBalancingScheme of INTERNAL_MANAGED. For more + information, read URL Map Concepts. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, url_map]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetUrlMapRequest): + request = compute.GetUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if url_map is not None: + request.url_map = url_map + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertUrlMapRequest, dict] = None, + *, + project: str = None, + url_map_resource: compute.UrlMap = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a UrlMap resource in the specified project + using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertUrlMapRequest, dict]): + The request object. A request message for + UrlMaps.Insert. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + This corresponds to the ``url_map_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, url_map_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertUrlMapRequest): + request = compute.InsertUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if url_map_resource is not None: + request.url_map_resource = url_map_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def invalidate_cache(self, + request: Union[compute.InvalidateCacheUrlMapRequest, dict] = None, + *, + project: str = None, + url_map: str = None, + cache_invalidation_rule_resource: compute.CacheInvalidationRule = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Initiates a cache invalidation operation, invalidating the + specified path, scoped to the specified UrlMap. For more + information, see `Invalidating cached + content `__. + + Args: + request (Union[google.cloud.compute_v1.types.InvalidateCacheUrlMapRequest, dict]): + The request object. A request message for + UrlMaps.InvalidateCache. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap scoping this + request. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cache_invalidation_rule_resource (google.cloud.compute_v1.types.CacheInvalidationRule): + The body resource for this request + This corresponds to the ``cache_invalidation_rule_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, url_map, cache_invalidation_rule_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InvalidateCacheUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InvalidateCacheUrlMapRequest): + request = compute.InvalidateCacheUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if url_map is not None: + request.url_map = url_map + if cache_invalidation_rule_resource is not None: + request.cache_invalidation_rule_resource = cache_invalidation_rule_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.invalidate_cache] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListUrlMapsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of UrlMap resources available to + the specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListUrlMapsRequest, dict]): + The request object. A request message for UrlMaps.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.url_maps.pagers.ListPager: + Contains a list of UrlMap resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListUrlMapsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListUrlMapsRequest): + request = compute.ListUrlMapsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def patch(self, + request: Union[compute.PatchUrlMapRequest, dict] = None, + *, + project: str = None, + url_map: str = None, + url_map_resource: compute.UrlMap = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Patches the specified UrlMap resource with the data + included in the request. This method supports PATCH + semantics and uses the JSON merge patch format and + processing rules. + + Args: + request (Union[google.cloud.compute_v1.types.PatchUrlMapRequest, dict]): + The request object. A request message for UrlMaps.Patch. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to patch. + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + This corresponds to the ``url_map_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, url_map, url_map_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.PatchUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.PatchUrlMapRequest): + request = compute.PatchUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if url_map is not None: + request.url_map = url_map + if url_map_resource is not None: + request.url_map_resource = url_map_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.patch] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update(self, + request: Union[compute.UpdateUrlMapRequest, dict] = None, + *, + project: str = None, + url_map: str = None, + url_map_resource: compute.UrlMap = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Updates the specified UrlMap resource with the data + included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.UpdateUrlMapRequest, dict]): + The request object. A request message for + UrlMaps.Update. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to + update. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + This corresponds to the ``url_map_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, url_map, url_map_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.UpdateUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.UpdateUrlMapRequest): + request = compute.UpdateUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if url_map is not None: + request.url_map = url_map + if url_map_resource is not None: + request.url_map_resource = url_map_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def validate(self, + request: Union[compute.ValidateUrlMapRequest, dict] = None, + *, + project: str = None, + url_map: str = None, + url_maps_validate_request_resource: compute.UrlMapsValidateRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.UrlMapsValidateResponse: + r"""Runs static validation for the UrlMap. In particular, + the tests of the provided UrlMap will be run. Calling + this method does NOT create the UrlMap. + + Args: + request (Union[google.cloud.compute_v1.types.ValidateUrlMapRequest, dict]): + The request object. A request message for + UrlMaps.Validate. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_map (str): + Name of the UrlMap resource to be + validated as. + + This corresponds to the ``url_map`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + url_maps_validate_request_resource (google.cloud.compute_v1.types.UrlMapsValidateRequest): + The body resource for this request + This corresponds to the ``url_maps_validate_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.UrlMapsValidateResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, url_map, url_maps_validate_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ValidateUrlMapRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ValidateUrlMapRequest): + request = compute.ValidateUrlMapRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if url_map is not None: + request.url_map = url_map + if url_maps_validate_request_resource is not None: + request.url_maps_validate_request_resource = url_maps_validate_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.validate] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "UrlMapsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/pagers.py new file mode 100644 index 000000000..6721a5f7a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.UrlMapsAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.UrlMapsAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.UrlMapsAggregatedList], + request: compute.AggregatedListUrlMapsRequest, + response: compute.UrlMapsAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListUrlMapsRequest): + The initial request object. + response (google.cloud.compute_v1.types.UrlMapsAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListUrlMapsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.UrlMapsAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.UrlMapsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.UrlMapsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.UrlMapList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.UrlMapList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.UrlMapList], + request: compute.ListUrlMapsRequest, + response: compute.UrlMapList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListUrlMapsRequest): + The initial request object. + response (google.cloud.compute_v1.types.UrlMapList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListUrlMapsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.UrlMapList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.UrlMap]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/__init__.py new file mode 100644 index 000000000..8cef95c71 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import UrlMapsTransport +from .rest import UrlMapsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[UrlMapsTransport]] +_transport_registry['rest'] = UrlMapsRestTransport + +__all__ = ( + 'UrlMapsTransport', + 'UrlMapsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/base.py new file mode 100644 index 000000000..ccdf5f122 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/base.py @@ -0,0 +1,259 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class UrlMapsTransport(abc.ABC): + """Abstract transport class for UrlMaps.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.invalidate_cache: gapic_v1.method.wrap_method( + self.invalidate_cache, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.patch: gapic_v1.method.wrap_method( + self.patch, + default_timeout=None, + client_info=client_info, + ), + self.update: gapic_v1.method.wrap_method( + self.update, + default_timeout=None, + client_info=client_info, + ), + self.validate: gapic_v1.method.wrap_method( + self.validate, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListUrlMapsRequest], + Union[ + compute.UrlMapsAggregatedList, + Awaitable[compute.UrlMapsAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetUrlMapRequest], + Union[ + compute.UrlMap, + Awaitable[compute.UrlMap] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def invalidate_cache(self) -> Callable[ + [compute.InvalidateCacheUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListUrlMapsRequest], + Union[ + compute.UrlMapList, + Awaitable[compute.UrlMapList] + ]]: + raise NotImplementedError() + + @property + def patch(self) -> Callable[ + [compute.PatchUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def update(self) -> Callable[ + [compute.UpdateUrlMapRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def validate(self) -> Callable[ + [compute.ValidateUrlMapRequest], + Union[ + compute.UrlMapsValidateResponse, + Awaitable[compute.UrlMapsValidateResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'UrlMapsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/rest.py new file mode 100644 index 000000000..2771758df --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/url_maps/transports/rest.py @@ -0,0 +1,1095 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import UrlMapsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class UrlMapsRestTransport(UrlMapsTransport): + """REST backend transport for UrlMaps. + + The UrlMaps API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListUrlMapsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UrlMapsAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListUrlMapsRequest): + The request object. A request message for + UrlMaps.AggregatedList. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UrlMapsAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/urlMaps', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListUrlMapsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListUrlMapsRequest.to_json( + compute.AggregatedListUrlMapsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UrlMapsAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteUrlMapRequest): + The request object. A request message for UrlMaps.Delete. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/global/urlMaps/{url_map}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.DeleteUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteUrlMapRequest.to_json( + compute.DeleteUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UrlMap: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetUrlMapRequest): + The request object. A request message for UrlMaps.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UrlMap: + Represents a URL Map resource. Google Compute Engine has + two URL Map resources: \* + `Global `__ \* + `Regional `__ + A URL map resource is a component of certain types of + GCP load balancers and Traffic Director. \* urlMaps are + used by external HTTP(S) load balancers and Traffic + Director. \* regionUrlMaps are used by internal HTTP(S) + load balancers. For a list of supported URL map features + by load balancer type, see the Load balancing features: + Routing and traffic management table. For a list of + supported URL map features for Traffic Director, see the + Traffic Director features: Routing and traffic + management table. This resource defines mappings from + host names and URL paths to either a backend service or + a backend bucket. To use the global urlMaps resource, + the backend service must have a loadBalancingScheme of + either EXTERNAL or INTERNAL_SELF_MANAGED. To use the + regionUrlMaps resource, the backend service must have a + loadBalancingScheme of INTERNAL_MANAGED. For more + information, read URL Map Concepts. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/urlMaps/{url_map}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.GetUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetUrlMapRequest.to_json( + compute.GetUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UrlMap.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertUrlMapRequest): + The request object. A request message for UrlMaps.Insert. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/urlMaps', + 'body': 'url_map_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.InsertUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMap.to_json( + compute.UrlMap( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertUrlMapRequest.to_json( + compute.InsertUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _invalidate_cache(self, + request: compute.InvalidateCacheUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the invalidate cache method over HTTP. + + Args: + request (~.compute.InvalidateCacheUrlMapRequest): + The request object. A request message for + UrlMaps.InvalidateCache. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/urlMaps/{url_map}/invalidateCache', + 'body': 'cache_invalidation_rule_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.InvalidateCacheUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.CacheInvalidationRule.to_json( + compute.CacheInvalidationRule( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InvalidateCacheUrlMapRequest.to_json( + compute.InvalidateCacheUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListUrlMapsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UrlMapList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListUrlMapsRequest): + The request object. A request message for UrlMaps.List. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UrlMapList: + Contains a list of UrlMap resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/global/urlMaps', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListUrlMapsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListUrlMapsRequest.to_json( + compute.ListUrlMapsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UrlMapList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _patch(self, + request: compute.PatchUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the patch method over HTTP. + + Args: + request (~.compute.PatchUrlMapRequest): + The request object. A request message for UrlMaps.Patch. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'patch', + 'uri': '/compute/v1/projects/{project}/global/urlMaps/{url_map}', + 'body': 'url_map_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.PatchUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMap.to_json( + compute.UrlMap( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.PatchUrlMapRequest.to_json( + compute.PatchUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _update(self, + request: compute.UpdateUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the update method over HTTP. + + Args: + request (~.compute.UpdateUrlMapRequest): + The request object. A request message for UrlMaps.Update. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'put', + 'uri': '/compute/v1/projects/{project}/global/urlMaps/{url_map}', + 'body': 'url_map_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.UpdateUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMap.to_json( + compute.UrlMap( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.UpdateUrlMapRequest.to_json( + compute.UpdateUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _validate(self, + request: compute.ValidateUrlMapRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.UrlMapsValidateResponse: + r"""Call the validate method over HTTP. + + Args: + request (~.compute.ValidateUrlMapRequest): + The request object. A request message for + UrlMaps.Validate. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.UrlMapsValidateResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/global/urlMaps/{url_map}/validate', + 'body': 'url_maps_validate_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "url_map", + "urlMap" + ), + ] + + request_kwargs = compute.ValidateUrlMapRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.UrlMapsValidateRequest.to_json( + compute.UrlMapsValidateRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ValidateUrlMapRequest.to_json( + compute.ValidateUrlMapRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.UrlMapsValidateResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListUrlMapsRequest], + compute.UrlMapsAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteUrlMapRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetUrlMapRequest], + compute.UrlMap]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertUrlMapRequest], + compute.Operation]: + return self._insert + @ property + def invalidate_cache(self) -> Callable[ + [compute.InvalidateCacheUrlMapRequest], + compute.Operation]: + return self._invalidate_cache + @ property + def list(self) -> Callable[ + [compute.ListUrlMapsRequest], + compute.UrlMapList]: + return self._list + @ property + def patch(self) -> Callable[ + [compute.PatchUrlMapRequest], + compute.Operation]: + return self._patch + @ property + def update(self) -> Callable[ + [compute.UpdateUrlMapRequest], + compute.Operation]: + return self._update + @ property + def validate(self) -> Callable[ + [compute.ValidateUrlMapRequest], + compute.UrlMapsValidateResponse]: + return self._validate + def close(self): + self._session.close() + + +__all__=( + 'UrlMapsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/__init__.py new file mode 100644 index 000000000..21ace412c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import VpnGatewaysClient + +__all__ = ( + 'VpnGatewaysClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/client.py new file mode 100644 index 000000000..a08a4145b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/client.py @@ -0,0 +1,1077 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.vpn_gateways import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import VpnGatewaysTransport, DEFAULT_CLIENT_INFO +from .transports.rest import VpnGatewaysRestTransport + + +class VpnGatewaysClientMeta(type): + """Metaclass for the VpnGateways client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[VpnGatewaysTransport]] + _transport_registry["rest"] = VpnGatewaysRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[VpnGatewaysTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class VpnGatewaysClient(metaclass=VpnGatewaysClientMeta): + """The VpnGateways API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VpnGatewaysClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VpnGatewaysClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VpnGatewaysTransport: + """Returns the transport used by the client instance. + + Returns: + VpnGatewaysTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, VpnGatewaysTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vpn gateways client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, VpnGatewaysTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, VpnGatewaysTransport): + # transport is a VpnGatewaysTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListVpnGatewaysRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of VPN gateways. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListVpnGatewaysRequest, dict]): + The request object. A request message for + VpnGateways.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.vpn_gateways.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListVpnGatewaysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListVpnGatewaysRequest): + request = compute.AggregatedListVpnGatewaysRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + vpn_gateway: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified VPN gateway. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteVpnGatewayRequest, dict]): + The request object. A request message for + VpnGateways.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + vpn_gateway (str): + Name of the VPN gateway to delete. + This corresponds to the ``vpn_gateway`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, vpn_gateway]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteVpnGatewayRequest): + request = compute.DeleteVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if vpn_gateway is not None: + request.vpn_gateway = vpn_gateway + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + vpn_gateway: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.VpnGateway: + r"""Returns the specified VPN gateway. Gets a list of + available VPN gateways by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetVpnGatewayRequest, dict]): + The request object. A request message for + VpnGateways.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + vpn_gateway (str): + Name of the VPN gateway to return. + This corresponds to the ``vpn_gateway`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.VpnGateway: + Represents a HA VPN gateway. HA VPN + is a high-availability (HA) Cloud VPN + solution that lets you securely connect + your on-premises network to your Google + Cloud Virtual Private Cloud network + through an IPsec VPN connection in a + single region. For more information + about Cloud HA VPN solutions, see Cloud + VPN topologies . + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, vpn_gateway]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetVpnGatewayRequest): + request = compute.GetVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if vpn_gateway is not None: + request.vpn_gateway = vpn_gateway + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_status(self, + request: Union[compute.GetStatusVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + vpn_gateway: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.VpnGatewaysGetStatusResponse: + r"""Returns the status for the specified VPN gateway. + + Args: + request (Union[google.cloud.compute_v1.types.GetStatusVpnGatewayRequest, dict]): + The request object. A request message for + VpnGateways.GetStatus. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + vpn_gateway (str): + Name of the VPN gateway to return. + This corresponds to the ``vpn_gateway`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.VpnGatewaysGetStatusResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, vpn_gateway]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetStatusVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetStatusVpnGatewayRequest): + request = compute.GetStatusVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if vpn_gateway is not None: + request.vpn_gateway = vpn_gateway + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_status] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + vpn_gateway_resource: compute.VpnGateway = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a VPN gateway in the specified project and + region using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertVpnGatewayRequest, dict]): + The request object. A request message for + VpnGateways.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + vpn_gateway_resource (google.cloud.compute_v1.types.VpnGateway): + The body resource for this request + This corresponds to the ``vpn_gateway_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, vpn_gateway_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertVpnGatewayRequest): + request = compute.InsertVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if vpn_gateway_resource is not None: + request.vpn_gateway_resource = vpn_gateway_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListVpnGatewaysRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of VPN gateways available to the + specified project and region. + + Args: + request (Union[google.cloud.compute_v1.types.ListVpnGatewaysRequest, dict]): + The request object. A request message for + VpnGateways.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.vpn_gateways.pagers.ListPager: + Contains a list of VpnGateway + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListVpnGatewaysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListVpnGatewaysRequest): + request = compute.ListVpnGatewaysRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_labels(self, + request: Union[compute.SetLabelsVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + region_set_labels_request_resource: compute.RegionSetLabelsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Sets the labels on a VpnGateway. To learn more about + labels, read the Labeling Resources documentation. + + Args: + request (Union[google.cloud.compute_v1.types.SetLabelsVpnGatewayRequest, dict]): + The request object. A request message for + VpnGateways.SetLabels. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region_set_labels_request_resource (google.cloud.compute_v1.types.RegionSetLabelsRequest): + The body resource for this request + This corresponds to the ``region_set_labels_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, region_set_labels_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.SetLabelsVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.SetLabelsVpnGatewayRequest): + request = compute.SetLabelsVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if region_set_labels_request_resource is not None: + request.region_set_labels_request_resource = region_set_labels_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_labels] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Union[compute.TestIamPermissionsVpnGatewayRequest, dict] = None, + *, + project: str = None, + region: str = None, + resource: str = None, + test_permissions_request_resource: compute.TestPermissionsRequest = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.TestPermissionsResponse: + r"""Returns permissions that a caller has on the + specified resource. + + Args: + request (Union[google.cloud.compute_v1.types.TestIamPermissionsVpnGatewayRequest, dict]): + The request object. A request message for + VpnGateways.TestIamPermissions. See the method + description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + The name of the region for this + request. + + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + resource (str): + Name or id of the resource for this + request. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + This corresponds to the ``test_permissions_request_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.TestPermissionsResponse: + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, resource, test_permissions_request_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.TestIamPermissionsVpnGatewayRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.TestIamPermissionsVpnGatewayRequest): + request = compute.TestIamPermissionsVpnGatewayRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if resource is not None: + request.resource = resource + if test_permissions_request_resource is not None: + request.test_permissions_request_resource = test_permissions_request_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VpnGatewaysClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/pagers.py new file mode 100644 index 000000000..de465e1dd --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.VpnGatewayAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.VpnGatewayAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.VpnGatewayAggregatedList], + request: compute.AggregatedListVpnGatewaysRequest, + response: compute.VpnGatewayAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListVpnGatewaysRequest): + The initial request object. + response (google.cloud.compute_v1.types.VpnGatewayAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListVpnGatewaysRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.VpnGatewayAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.VpnGatewaysScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.VpnGatewaysScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.VpnGatewayList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.VpnGatewayList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.VpnGatewayList], + request: compute.ListVpnGatewaysRequest, + response: compute.VpnGatewayList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListVpnGatewaysRequest): + The initial request object. + response (google.cloud.compute_v1.types.VpnGatewayList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListVpnGatewaysRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.VpnGatewayList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.VpnGateway]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/__init__.py new file mode 100644 index 000000000..675e9658c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import VpnGatewaysTransport +from .rest import VpnGatewaysRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[VpnGatewaysTransport]] +_transport_registry['rest'] = VpnGatewaysRestTransport + +__all__ = ( + 'VpnGatewaysTransport', + 'VpnGatewaysRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/base.py new file mode 100644 index 000000000..a6c43b571 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/base.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class VpnGatewaysTransport(abc.ABC): + """Abstract transport class for VpnGateways.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.get_status: gapic_v1.method.wrap_method( + self.get_status, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.set_labels: gapic_v1.method.wrap_method( + self.set_labels, + default_timeout=None, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListVpnGatewaysRequest], + Union[ + compute.VpnGatewayAggregatedList, + Awaitable[compute.VpnGatewayAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetVpnGatewayRequest], + Union[ + compute.VpnGateway, + Awaitable[compute.VpnGateway] + ]]: + raise NotImplementedError() + + @property + def get_status(self) -> Callable[ + [compute.GetStatusVpnGatewayRequest], + Union[ + compute.VpnGatewaysGetStatusResponse, + Awaitable[compute.VpnGatewaysGetStatusResponse] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListVpnGatewaysRequest], + Union[ + compute.VpnGatewayList, + Awaitable[compute.VpnGatewayList] + ]]: + raise NotImplementedError() + + @property + def set_labels(self) -> Callable[ + [compute.SetLabelsVpnGatewayRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsVpnGatewayRequest], + Union[ + compute.TestPermissionsResponse, + Awaitable[compute.TestPermissionsResponse] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'VpnGatewaysTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/rest.py new file mode 100644 index 000000000..087f126e7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_gateways/transports/rest.py @@ -0,0 +1,977 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import VpnGatewaysTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class VpnGatewaysRestTransport(VpnGatewaysTransport): + """REST backend transport for VpnGateways. + + The VpnGateways API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListVpnGatewaysRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VpnGatewayAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListVpnGatewaysRequest): + The request object. A request message for + VpnGateways.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VpnGatewayAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/vpnGateways', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListVpnGatewaysRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListVpnGatewaysRequest.to_json( + compute.AggregatedListVpnGatewaysRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VpnGatewayAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteVpnGatewayRequest): + The request object. A request message for + VpnGateways.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "vpn_gateway", + "vpnGateway" + ), + ] + + request_kwargs = compute.DeleteVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteVpnGatewayRequest.to_json( + compute.DeleteVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VpnGateway: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetVpnGatewayRequest): + The request object. A request message for + VpnGateways.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VpnGateway: + Represents a HA VPN gateway. HA VPN + is a high-availability (HA) Cloud VPN + solution that lets you securely connect + your on-premises network to your Google + Cloud Virtual Private Cloud network + through an IPsec VPN connection in a + single region. For more information + about Cloud HA VPN solutions, see Cloud + VPN topologies . + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "vpn_gateway", + "vpnGateway" + ), + ] + + request_kwargs = compute.GetVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetVpnGatewayRequest.to_json( + compute.GetVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VpnGateway.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get_status(self, + request: compute.GetStatusVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VpnGatewaysGetStatusResponse: + r"""Call the get status method over HTTP. + + Args: + request (~.compute.GetStatusVpnGatewayRequest): + The request object. A request message for + VpnGateways.GetStatus. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VpnGatewaysGetStatusResponse: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}/getStatus', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "vpn_gateway", + "vpnGateway" + ), + ] + + request_kwargs = compute.GetStatusVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetStatusVpnGatewayRequest.to_json( + compute.GetStatusVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VpnGatewaysGetStatusResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertVpnGatewayRequest): + The request object. A request message for + VpnGateways.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnGateways', + 'body': 'vpn_gateway_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.VpnGateway.to_json( + compute.VpnGateway( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertVpnGatewayRequest.to_json( + compute.InsertVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListVpnGatewaysRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VpnGatewayList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListVpnGatewaysRequest): + The request object. A request message for + VpnGateways.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VpnGatewayList: + Contains a list of VpnGateway + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnGateways', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListVpnGatewaysRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListVpnGatewaysRequest.to_json( + compute.ListVpnGatewaysRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VpnGatewayList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _set_labels(self, + request: compute.SetLabelsVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the set labels method over HTTP. + + Args: + request (~.compute.SetLabelsVpnGatewayRequest): + The request object. A request message for + VpnGateways.SetLabels. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnGateways/{resource}/setLabels', + 'body': 'region_set_labels_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.SetLabelsVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.RegionSetLabelsRequest.to_json( + compute.RegionSetLabelsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.SetLabelsVpnGatewayRequest.to_json( + compute.SetLabelsVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _test_iam_permissions(self, + request: compute.TestIamPermissionsVpnGatewayRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.TestPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.compute.TestIamPermissionsVpnGatewayRequest): + The request object. A request message for + VpnGateways.TestIamPermissions. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.TestPermissionsResponse: + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions', + 'body': 'test_permissions_request_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "resource", + "resource" + ), + ] + + request_kwargs = compute.TestIamPermissionsVpnGatewayRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.TestPermissionsRequest.to_json( + compute.TestPermissionsRequest( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.TestIamPermissionsVpnGatewayRequest.to_json( + compute.TestIamPermissionsVpnGatewayRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.TestPermissionsResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListVpnGatewaysRequest], + compute.VpnGatewayAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteVpnGatewayRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetVpnGatewayRequest], + compute.VpnGateway]: + return self._get + @ property + def get_status(self) -> Callable[ + [compute.GetStatusVpnGatewayRequest], + compute.VpnGatewaysGetStatusResponse]: + return self._get_status + @ property + def insert(self) -> Callable[ + [compute.InsertVpnGatewayRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListVpnGatewaysRequest], + compute.VpnGatewayList]: + return self._list + @ property + def set_labels(self) -> Callable[ + [compute.SetLabelsVpnGatewayRequest], + compute.Operation]: + return self._set_labels + @ property + def test_iam_permissions(self) -> Callable[ + [compute.TestIamPermissionsVpnGatewayRequest], + compute.TestPermissionsResponse]: + return self._test_iam_permissions + def close(self): + self._session.close() + + +__all__=( + 'VpnGatewaysRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/__init__.py new file mode 100644 index 000000000..3d856c080 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import VpnTunnelsClient + +__all__ = ( + 'VpnTunnelsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/client.py new file mode 100644 index 000000000..50b16829c --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/client.py @@ -0,0 +1,795 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.vpn_tunnels import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import VpnTunnelsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import VpnTunnelsRestTransport + + +class VpnTunnelsClientMeta(type): + """Metaclass for the VpnTunnels client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[VpnTunnelsTransport]] + _transport_registry["rest"] = VpnTunnelsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[VpnTunnelsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class VpnTunnelsClient(metaclass=VpnTunnelsClientMeta): + """The VpnTunnels API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VpnTunnelsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + VpnTunnelsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> VpnTunnelsTransport: + """Returns the transport used by the client instance. + + Returns: + VpnTunnelsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, VpnTunnelsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the vpn tunnels client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, VpnTunnelsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, VpnTunnelsTransport): + # transport is a VpnTunnelsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def aggregated_list(self, + request: Union[compute.AggregatedListVpnTunnelsRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.AggregatedListPager: + r"""Retrieves an aggregated list of VPN tunnels. + + Args: + request (Union[google.cloud.compute_v1.types.AggregatedListVpnTunnelsRequest, dict]): + The request object. A request message for + VpnTunnels.AggregatedList. See the method description + for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.vpn_tunnels.pagers.AggregatedListPager: + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.AggregatedListVpnTunnelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.AggregatedListVpnTunnelsRequest): + request = compute.AggregatedListVpnTunnelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.aggregated_list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.AggregatedListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete(self, + request: Union[compute.DeleteVpnTunnelRequest, dict] = None, + *, + project: str = None, + region: str = None, + vpn_tunnel: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Deletes the specified VpnTunnel resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteVpnTunnelRequest, dict]): + The request object. A request message for + VpnTunnels.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + vpn_tunnel (str): + Name of the VpnTunnel resource to + delete. + + This corresponds to the ``vpn_tunnel`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, vpn_tunnel]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteVpnTunnelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteVpnTunnelRequest): + request = compute.DeleteVpnTunnelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if vpn_tunnel is not None: + request.vpn_tunnel = vpn_tunnel + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetVpnTunnelRequest, dict] = None, + *, + project: str = None, + region: str = None, + vpn_tunnel: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.VpnTunnel: + r"""Returns the specified VpnTunnel resource. Gets a list + of available VPN tunnels by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetVpnTunnelRequest, dict]): + The request object. A request message for + VpnTunnels.Get. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + vpn_tunnel (str): + Name of the VpnTunnel resource to + return. + + This corresponds to the ``vpn_tunnel`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.VpnTunnel: + Represents a Cloud VPN Tunnel + resource. For more information about + VPN, read the the Cloud VPN Overview. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, vpn_tunnel]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetVpnTunnelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetVpnTunnelRequest): + request = compute.GetVpnTunnelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if vpn_tunnel is not None: + request.vpn_tunnel = vpn_tunnel + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def insert(self, + request: Union[compute.InsertVpnTunnelRequest, dict] = None, + *, + project: str = None, + region: str = None, + vpn_tunnel_resource: compute.VpnTunnel = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Creates a VpnTunnel resource in the specified project + and region using the data included in the request. + + Args: + request (Union[google.cloud.compute_v1.types.InsertVpnTunnelRequest, dict]): + The request object. A request message for + VpnTunnels.Insert. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + vpn_tunnel_resource (google.cloud.compute_v1.types.VpnTunnel): + The body resource for this request + This corresponds to the ``vpn_tunnel_resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region, vpn_tunnel_resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.InsertVpnTunnelRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.InsertVpnTunnelRequest): + request = compute.InsertVpnTunnelRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + if vpn_tunnel_resource is not None: + request.vpn_tunnel_resource = vpn_tunnel_resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.insert] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListVpnTunnelsRequest, dict] = None, + *, + project: str = None, + region: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of VpnTunnel resources contained in + the specified project and region. + + Args: + request (Union[google.cloud.compute_v1.types.ListVpnTunnelsRequest, dict]): + The request object. A request message for + VpnTunnels.List. See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + region (str): + Name of the region for this request. + This corresponds to the ``region`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.vpn_tunnels.pagers.ListPager: + Contains a list of VpnTunnel + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, region]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListVpnTunnelsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListVpnTunnelsRequest): + request = compute.ListVpnTunnelsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if region is not None: + request.region = region + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "VpnTunnelsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/pagers.py new file mode 100644 index 000000000..3c71ff356 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/pagers.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class AggregatedListPager: + """A pager for iterating through ``aggregated_list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.VpnTunnelAggregatedList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``AggregatedList`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.VpnTunnelAggregatedList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.VpnTunnelAggregatedList], + request: compute.AggregatedListVpnTunnelsRequest, + response: compute.VpnTunnelAggregatedList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.AggregatedListVpnTunnelsRequest): + The initial request object. + response (google.cloud.compute_v1.types.VpnTunnelAggregatedList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.AggregatedListVpnTunnelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.VpnTunnelAggregatedList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[Tuple[str, compute.VpnTunnelsScopedList]]: + for page in self.pages: + yield from page.items.items() + + def get(self, key: str) -> Optional[compute.VpnTunnelsScopedList]: + return self._response.items.get(key) + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.VpnTunnelList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.VpnTunnelList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.VpnTunnelList], + request: compute.ListVpnTunnelsRequest, + response: compute.VpnTunnelList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListVpnTunnelsRequest): + The initial request object. + response (google.cloud.compute_v1.types.VpnTunnelList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListVpnTunnelsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.VpnTunnelList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.VpnTunnel]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/__init__.py new file mode 100644 index 000000000..1607c0362 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import VpnTunnelsTransport +from .rest import VpnTunnelsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[VpnTunnelsTransport]] +_transport_registry['rest'] = VpnTunnelsRestTransport + +__all__ = ( + 'VpnTunnelsTransport', + 'VpnTunnelsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/base.py new file mode 100644 index 000000000..ba04348e7 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/base.py @@ -0,0 +1,203 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class VpnTunnelsTransport(abc.ABC): + """Abstract transport class for VpnTunnels.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.aggregated_list: gapic_v1.method.wrap_method( + self.aggregated_list, + default_timeout=None, + client_info=client_info, + ), + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.insert: gapic_v1.method.wrap_method( + self.insert, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListVpnTunnelsRequest], + Union[ + compute.VpnTunnelAggregatedList, + Awaitable[compute.VpnTunnelAggregatedList] + ]]: + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteVpnTunnelRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetVpnTunnelRequest], + Union[ + compute.VpnTunnel, + Awaitable[compute.VpnTunnel] + ]]: + raise NotImplementedError() + + @property + def insert(self) -> Callable[ + [compute.InsertVpnTunnelRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListVpnTunnelsRequest], + Union[ + compute.VpnTunnelList, + Awaitable[compute.VpnTunnelList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'VpnTunnelsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/rest.py new file mode 100644 index 000000000..fcdd71a64 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/vpn_tunnels/transports/rest.py @@ -0,0 +1,643 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import VpnTunnelsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class VpnTunnelsRestTransport(VpnTunnelsTransport): + """REST backend transport for VpnTunnels. + + The VpnTunnels API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _aggregated_list(self, + request: compute.AggregatedListVpnTunnelsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VpnTunnelAggregatedList: + r"""Call the aggregated list method over HTTP. + + Args: + request (~.compute.AggregatedListVpnTunnelsRequest): + The request object. A request message for + VpnTunnels.AggregatedList. See the + method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VpnTunnelAggregatedList: + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/aggregated/vpnTunnels', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.AggregatedListVpnTunnelsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.AggregatedListVpnTunnelsRequest.to_json( + compute.AggregatedListVpnTunnelsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VpnTunnelAggregatedList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _delete(self, + request: compute.DeleteVpnTunnelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteVpnTunnelRequest): + The request object. A request message for + VpnTunnels.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnTunnels/{vpn_tunnel}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "vpn_tunnel", + "vpnTunnel" + ), + ] + + request_kwargs = compute.DeleteVpnTunnelRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteVpnTunnelRequest.to_json( + compute.DeleteVpnTunnelRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetVpnTunnelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VpnTunnel: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetVpnTunnelRequest): + The request object. A request message for VpnTunnels.Get. + See the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VpnTunnel: + Represents a Cloud VPN Tunnel + resource. For more information about + VPN, read the the Cloud VPN Overview. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnTunnels/{vpn_tunnel}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ( + "vpn_tunnel", + "vpnTunnel" + ), + ] + + request_kwargs = compute.GetVpnTunnelRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetVpnTunnelRequest.to_json( + compute.GetVpnTunnelRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VpnTunnel.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _insert(self, + request: compute.InsertVpnTunnelRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the insert method over HTTP. + + Args: + request (~.compute.InsertVpnTunnelRequest): + The request object. A request message for + VpnTunnels.Insert. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnTunnels', + 'body': 'vpn_tunnel_resource', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.InsertVpnTunnelRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + # Jsonify the request body + body = compute.VpnTunnel.to_json( + compute.VpnTunnel( + transcoded_request['body']), + including_default_value_fields=False, + use_integers_for_enums=False + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.InsertVpnTunnelRequest.to_json( + compute.InsertVpnTunnelRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListVpnTunnelsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.VpnTunnelList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListVpnTunnelsRequest): + The request object. A request message for + VpnTunnels.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.VpnTunnelList: + Contains a list of VpnTunnel + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/regions/{region}/vpnTunnels', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "region", + "region" + ), + ] + + request_kwargs = compute.ListVpnTunnelsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListVpnTunnelsRequest.to_json( + compute.ListVpnTunnelsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.VpnTunnelList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def aggregated_list(self) -> Callable[ + [compute.AggregatedListVpnTunnelsRequest], + compute.VpnTunnelAggregatedList]: + return self._aggregated_list + @ property + def delete(self) -> Callable[ + [compute.DeleteVpnTunnelRequest], + compute.Operation]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetVpnTunnelRequest], + compute.VpnTunnel]: + return self._get + @ property + def insert(self) -> Callable[ + [compute.InsertVpnTunnelRequest], + compute.Operation]: + return self._insert + @ property + def list(self) -> Callable[ + [compute.ListVpnTunnelsRequest], + compute.VpnTunnelList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'VpnTunnelsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/__init__.py new file mode 100644 index 000000000..bfabbb2c8 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ZoneOperationsClient + +__all__ = ( + 'ZoneOperationsClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/client.py new file mode 100644 index 000000000..c59168112 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/client.py @@ -0,0 +1,734 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.zone_operations import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ZoneOperationsTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ZoneOperationsRestTransport + + +class ZoneOperationsClientMeta(type): + """Metaclass for the ZoneOperations client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ZoneOperationsTransport]] + _transport_registry["rest"] = ZoneOperationsRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ZoneOperationsTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ZoneOperationsClient(metaclass=ZoneOperationsClientMeta): + """The ZoneOperations API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ZoneOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ZoneOperationsClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ZoneOperationsTransport: + """Returns the transport used by the client instance. + + Returns: + ZoneOperationsTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ZoneOperationsTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the zone operations client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ZoneOperationsTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ZoneOperationsTransport): + # transport is a ZoneOperationsTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def delete(self, + request: Union[compute.DeleteZoneOperationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.DeleteZoneOperationResponse: + r"""Deletes the specified zone-specific Operations + resource. + + Args: + request (Union[google.cloud.compute_v1.types.DeleteZoneOperationRequest, dict]): + The request object. A request message for + ZoneOperations.Delete. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + delete. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.DeleteZoneOperationResponse: + A response message for + ZoneOperations.Delete. See the method + description for details. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.DeleteZoneOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.DeleteZoneOperationRequest): + request = compute.DeleteZoneOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get(self, + request: Union[compute.GetZoneOperationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Retrieves the specified zone-specific Operations + resource. + + Args: + request (Union[google.cloud.compute_v1.types.GetZoneOperationRequest, dict]): + The request object. A request message for + ZoneOperations.Get. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + return. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetZoneOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetZoneOperationRequest): + request = compute.GetZoneOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListZoneOperationsRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves a list of Operation resources contained + within the specified zone. + + Args: + request (Union[google.cloud.compute_v1.types.ListZoneOperationsRequest, dict]): + The request object. A request message for + ZoneOperations.List. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.zone_operations.pagers.ListPager: + Contains a list of Operation + resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListZoneOperationsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListZoneOperationsRequest): + request = compute.ListZoneOperationsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def wait(self, + request: Union[compute.WaitZoneOperationRequest, dict] = None, + *, + project: str = None, + zone: str = None, + operation: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Operation: + r"""Waits for the specified Operation resource to return as ``DONE`` + or for the request to approach the 2 minute deadline, and + retrieves the specified Operation resource. This method waits + for no more than the 2 minutes and then returns the current + state of the operation, which might be ``DONE`` or still in + progress. This method is called on a best-effort basis. + Specifically: - In uncommon cases, when the server is + overloaded, the request might return before the default deadline + is reached, or might return after zero seconds. - If the default + deadline is reached, there is no guarantee that the operation is + actually done when the method returns. Be prepared to retry if + the operation is not ``DONE``. + + Args: + request (Union[google.cloud.compute_v1.types.WaitZoneOperationRequest, dict]): + The request object. A request message for + ZoneOperations.Wait. See the method description for + details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone for this request. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + operation (str): + Name of the Operations resource to + return. + + This corresponds to the ``operation`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + [Global](/compute/docs/reference/rest/v1/globalOperations) + \* + [Regional](/compute/docs/reference/rest/v1/regionOperations) + \* + [Zonal](/compute/docs/reference/rest/v1/zoneOperations) + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the globalOperations + resource. - For regional operations, use the + regionOperations resource. - For zonal operations, use + the zonalOperations resource. For more information, read + Global, Regional, and Zonal Resources. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone, operation]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.WaitZoneOperationRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.WaitZoneOperationRequest): + request = compute.WaitZoneOperationRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + if operation is not None: + request.operation = operation + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.wait] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ZoneOperationsClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/pagers.py new file mode 100644 index 000000000..43781992b --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.OperationList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.OperationList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.OperationList], + request: compute.ListZoneOperationsRequest, + response: compute.OperationList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListZoneOperationsRequest): + The initial request object. + response (google.cloud.compute_v1.types.OperationList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListZoneOperationsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.OperationList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Operation]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/__init__.py new file mode 100644 index 000000000..de84d78fe --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ZoneOperationsTransport +from .rest import ZoneOperationsRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ZoneOperationsTransport]] +_transport_registry['rest'] = ZoneOperationsRestTransport + +__all__ = ( + 'ZoneOperationsTransport', + 'ZoneOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/base.py new file mode 100644 index 000000000..f69ac7588 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/base.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ZoneOperationsTransport(abc.ABC): + """Abstract transport class for ZoneOperations.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.delete: gapic_v1.method.wrap_method( + self.delete, + default_timeout=None, + client_info=client_info, + ), + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + self.wait: gapic_v1.method.wrap_method( + self.wait, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def delete(self) -> Callable[ + [compute.DeleteZoneOperationRequest], + Union[ + compute.DeleteZoneOperationResponse, + Awaitable[compute.DeleteZoneOperationResponse] + ]]: + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetZoneOperationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListZoneOperationsRequest], + Union[ + compute.OperationList, + Awaitable[compute.OperationList] + ]]: + raise NotImplementedError() + + @property + def wait(self) -> Callable[ + [compute.WaitZoneOperationRequest], + Union[ + compute.Operation, + Awaitable[compute.Operation] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ZoneOperationsTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/rest.py new file mode 100644 index 000000000..1090859d3 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zone_operations/transports/rest.py @@ -0,0 +1,549 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ZoneOperationsTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ZoneOperationsRestTransport(ZoneOperationsTransport): + """REST backend transport for ZoneOperations. + + The ZoneOperations API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _delete(self, + request: compute.DeleteZoneOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.DeleteZoneOperationResponse: + r"""Call the delete method over HTTP. + + Args: + request (~.compute.DeleteZoneOperationRequest): + The request object. A request message for + ZoneOperations.Delete. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.DeleteZoneOperationResponse: + A response message for + ZoneOperations.Delete. See the method + description for details. + + """ + + http_options = [ + { + 'method': 'delete', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.DeleteZoneOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.DeleteZoneOperationRequest.to_json( + compute.DeleteZoneOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.DeleteZoneOperationResponse.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _get(self, + request: compute.GetZoneOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetZoneOperationRequest): + The request object. A request message for + ZoneOperations.Get. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/operations/{operation}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetZoneOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetZoneOperationRequest.to_json( + compute.GetZoneOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListZoneOperationsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.OperationList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListZoneOperationsRequest): + The request object. A request message for + ZoneOperations.List. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.OperationList: + Contains a list of Operation + resources. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/operations', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.ListZoneOperationsRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListZoneOperationsRequest.to_json( + compute.ListZoneOperationsRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.OperationList.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _wait(self, + request: compute.WaitZoneOperationRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Operation: + r"""Call the wait method over HTTP. + + Args: + request (~.compute.WaitZoneOperationRequest): + The request object. A request message for + ZoneOperations.Wait. See the method + description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Operation: + Represents an Operation resource. Google Compute Engine + has three Operation resources: \* + `Global `__ + \* + `Regional `__ + \* + `Zonal `__ + You can use an operation resource to manage asynchronous + API requests. For more information, read Handling API + responses. Operations can be global, regional or zonal. + - For global operations, use the ``globalOperations`` + resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, + use the ``zonalOperations`` resource. For more + information, read Global, Regional, and Zonal Resources. + + """ + + http_options = [ + { + 'method': 'post', + 'uri': '/compute/v1/projects/{project}/zones/{zone}/operations/{operation}/wait', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "operation", + "operation" + ), + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.WaitZoneOperationRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.WaitZoneOperationRequest.to_json( + compute.WaitZoneOperationRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Operation.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def delete(self) -> Callable[ + [compute.DeleteZoneOperationRequest], + compute.DeleteZoneOperationResponse]: + return self._delete + @ property + def get(self) -> Callable[ + [compute.GetZoneOperationRequest], + compute.Operation]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListZoneOperationsRequest], + compute.OperationList]: + return self._list + @ property + def wait(self) -> Callable[ + [compute.WaitZoneOperationRequest], + compute.Operation]: + return self._wait + def close(self): + self._session.close() + + +__all__=( + 'ZoneOperationsRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/__init__.py new file mode 100644 index 000000000..de1bd13ed --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import ZonesClient + +__all__ = ( + 'ZonesClient', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/client.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/client.py new file mode 100644 index 000000000..b744b8c1a --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/client.py @@ -0,0 +1,510 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Optional, Sequence, Tuple, Type, Union +import pkg_resources + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.compute_v1.services.zones import pagers +from google.cloud.compute_v1.types import compute +from .transports.base import ZonesTransport, DEFAULT_CLIENT_INFO +from .transports.rest import ZonesRestTransport + + +class ZonesClientMeta(type): + """Metaclass for the Zones client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[ZonesTransport]] + _transport_registry["rest"] = ZonesRestTransport + + def get_transport_class(cls, + label: str = None, + ) -> Type[ZonesTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class ZonesClient(metaclass=ZonesClientMeta): + """The Zones API.""" + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "compute.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ZonesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + ZonesClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> ZonesTransport: + """Returns the transport used by the client instance. + + Returns: + ZonesTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, ZonesTransport, None] = None, + client_options: Optional[client_options_lib.ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the zones client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ZonesTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + + # Create SSL credentials for mutual TLS if needed. + if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true" + + client_cert_source_func = None + is_mtls = False + if use_client_cert: + if client_options.client_cert_source: + is_mtls = True + client_cert_source_func = client_options.client_cert_source + else: + is_mtls = mtls.has_default_client_cert_source() + if is_mtls: + client_cert_source_func = mtls.default_client_cert_source() + else: + client_cert_source_func = None + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + else: + use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_mtls_env == "never": + api_endpoint = self.DEFAULT_ENDPOINT + elif use_mtls_env == "always": + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + elif use_mtls_env == "auto": + if is_mtls: + api_endpoint = self.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = self.DEFAULT_ENDPOINT + else: + raise MutualTLSChannelError( + "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted " + "values: never, auto, always" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, ZonesTransport): + # transport is a ZonesTransport instance. + if credentials or client_options.credentials_file: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + ) + + def get(self, + request: Union[compute.GetZoneRequest, dict] = None, + *, + project: str = None, + zone: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> compute.Zone: + r"""Returns the specified Zone resource. Gets a list of + available zones by making a list() request. + + Args: + request (Union[google.cloud.compute_v1.types.GetZoneRequest, dict]): + The request object. A request message for Zones.Get. See + the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + zone (str): + Name of the zone resource to return. + This corresponds to the ``zone`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.types.Zone: + Represents a Zone resource. A zone is + a deployment area. These deployment + areas are subsets of a region. For + example the zone us-east1-a is located + in the us-east1 region. For more + information, read Regions and Zones. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project, zone]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.GetZoneRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.GetZoneRequest): + request = compute.GetZoneRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + if zone is not None: + request.zone = zone + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list(self, + request: Union[compute.ListZonesRequest, dict] = None, + *, + project: str = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: float = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListPager: + r"""Retrieves the list of Zone resources available to the + specified project. + + Args: + request (Union[google.cloud.compute_v1.types.ListZonesRequest, dict]): + The request object. A request message for Zones.List. + See the method description for details. + project (str): + Project ID for this request. + This corresponds to the ``project`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.compute_v1.services.zones.pagers.ListPager: + Contains a list of zone resources. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Sanity check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([project]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a compute.ListZonesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, compute.ListZonesRequest): + request = compute.ListZonesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if project is not None: + request.project = project + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + "google-cloud-compute", + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +__all__ = ( + "ZonesClient", +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/pagers.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/pagers.py new file mode 100644 index 000000000..8ebca9b84 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/pagers.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.compute_v1.types import compute + + +class ListPager: + """A pager for iterating through ``list`` requests. + + This class thinly wraps an initial + :class:`google.cloud.compute_v1.types.ZoneList` object, and + provides an ``__iter__`` method to iterate through its + ``items`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``List`` requests and continue to iterate + through the ``items`` field on the + corresponding responses. + + All the usual :class:`google.cloud.compute_v1.types.ZoneList` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., compute.ZoneList], + request: compute.ListZonesRequest, + response: compute.ZoneList, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.compute_v1.types.ListZonesRequest): + The initial request object. + response (google.cloud.compute_v1.types.ZoneList): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = compute.ListZonesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[compute.ZoneList]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[compute.Zone]: + for page in self.pages: + yield from page.items + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/__init__.py new file mode 100644 index 000000000..873063beb --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/__init__.py @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import ZonesTransport +from .rest import ZonesRestTransport + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[ZonesTransport]] +_transport_registry['rest'] = ZonesRestTransport + +__all__ = ( + 'ZonesTransport', + 'ZonesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/base.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/base.py new file mode 100644 index 000000000..ef798a3a4 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/base.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union +import pkg_resources + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.compute_v1.types import compute + +try: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=pkg_resources.get_distribution( + 'google-cloud-compute', + ).version, + ) +except pkg_resources.DistributionNotFound: + DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() + + +class ZonesTransport(abc.ABC): + """Abstract transport class for Zones.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', + ) + + DEFAULT_HOST: str = 'compute.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: ga_credentials.Credentials = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.get: gapic_v1.method.wrap_method( + self.get, + default_timeout=None, + client_info=client_info, + ), + self.list: gapic_v1.method.wrap_method( + self.list, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def get(self) -> Callable[ + [compute.GetZoneRequest], + Union[ + compute.Zone, + Awaitable[compute.Zone] + ]]: + raise NotImplementedError() + + @property + def list(self) -> Callable[ + [compute.ListZonesRequest], + Union[ + compute.ZoneList, + Awaitable[compute.ZoneList] + ]]: + raise NotImplementedError() + + +__all__ = ( + 'ZonesTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/rest.py b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/rest.py new file mode 100644 index 000000000..510688d1e --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/services/zones/transports/rest.py @@ -0,0 +1,314 @@ +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import path_template +from google.api_core import gapic_v1 +from requests import __version__ as requests_version +from typing import Callable, Dict, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + + +from google.cloud.compute_v1.types import compute + +from .base import ZonesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + +class ZonesRestTransport(ZonesTransport): + """REST backend transport for Zones. + + The Zones API. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + """ + def __init__(self, *, + host: str = 'compute.googleapis.com', + credentials: ga_credentials.Credentials=None, + credentials_file: str=None, + scopes: Sequence[str]=None, + client_cert_source_for_mtls: Callable[[ + ], Tuple[bytes, bytes]]=None, + quota_project_id: Optional[str]=None, + client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool]=False, + url_scheme: str='https', + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._prep_wrapped_messages(client_info) + + def _get(self, + request: compute.GetZoneRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.Zone: + r"""Call the get method over HTTP. + + Args: + request (~.compute.GetZoneRequest): + The request object. A request message for Zones.Get. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.Zone: + Represents a Zone resource. A zone is + a deployment area. These deployment + areas are subsets of a region. For + example the zone us-east1-a is located + in the us-east1 region. For more + information, read Regions and Zones. + + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones/{zone}', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ( + "zone", + "zone" + ), + ] + + request_kwargs = compute.GetZoneRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.GetZoneRequest.to_json( + compute.GetZoneRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.Zone.from_json( + response.content, + ignore_unknown_fields=True + ) + + def _list(self, + request: compute.ListZonesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: float=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> compute.ZoneList: + r"""Call the list method over HTTP. + + Args: + request (~.compute.ListZonesRequest): + The request object. A request message for Zones.List. See + the method description for details. + + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.compute.ZoneList: + Contains a list of zone resources. + """ + + http_options = [ + { + 'method': 'get', + 'uri': '/compute/v1/projects/{project}/zones', + }, + ] + + required_fields = [ + # (snake_case_name, camel_case_name) + ( + "project", + "project" + ), + ] + + request_kwargs = compute.ListZonesRequest.to_dict(request) + transcoded_request = path_template.transcode( + http_options, **request_kwargs) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(compute.ListZonesRequest.to_json( + compute.ListZonesRequest(transcoded_request['query_params']), + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # Ensure required fields have values in query_params. + # If a required field has a default value, it can get lost + # by the to_json call above. + orig_query_params = transcoded_request["query_params"] + for snake_case_name, camel_case_name in required_fields: + if snake_case_name in orig_query_params: + if camel_case_name not in query_params: + query_params[camel_case_name] = orig_query_params[snake_case_name] + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response=getattr(self._session, method)( + # Replace with proper schema configuration (http/https) logic + "https://{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + return compute.ZoneList.from_json( + response.content, + ignore_unknown_fields=True + ) + + @ property + def get(self) -> Callable[ + [compute.GetZoneRequest], + compute.Zone]: + return self._get + @ property + def list(self) -> Callable[ + [compute.ListZonesRequest], + compute.ZoneList]: + return self._list + def close(self): + self._session.close() + + +__all__=( + 'ZonesRestTransport', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/types/__init__.py b/owl-bot-staging/v1/google/cloud/compute_v1/types/__init__.py new file mode 100644 index 000000000..89cd96235 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/types/__init__.py @@ -0,0 +1,2340 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .compute import ( + AbandonInstancesInstanceGroupManagerRequest, + AbandonInstancesRegionInstanceGroupManagerRequest, + AcceleratorConfig, + Accelerators, + AcceleratorType, + AcceleratorTypeAggregatedList, + AcceleratorTypeList, + AcceleratorTypesScopedList, + AccessConfig, + AddAccessConfigInstanceRequest, + AddAssociationFirewallPolicyRequest, + AddHealthCheckTargetPoolRequest, + AddInstancesInstanceGroupRequest, + AddInstanceTargetPoolRequest, + AddNodesNodeGroupRequest, + AddPeeringNetworkRequest, + AddResourcePoliciesDiskRequest, + AddResourcePoliciesInstanceRequest, + AddResourcePoliciesRegionDiskRequest, + Address, + AddressAggregatedList, + AddressesScopedList, + AddressList, + AddRuleFirewallPolicyRequest, + AddRuleSecurityPolicyRequest, + AddSignedUrlKeyBackendBucketRequest, + AddSignedUrlKeyBackendServiceRequest, + AdvancedMachineFeatures, + AggregatedListAcceleratorTypesRequest, + AggregatedListAddressesRequest, + AggregatedListAutoscalersRequest, + AggregatedListBackendServicesRequest, + AggregatedListDisksRequest, + AggregatedListDiskTypesRequest, + AggregatedListForwardingRulesRequest, + AggregatedListGlobalOperationsRequest, + AggregatedListHealthChecksRequest, + AggregatedListInstanceGroupManagersRequest, + AggregatedListInstanceGroupsRequest, + AggregatedListInstancesRequest, + AggregatedListInterconnectAttachmentsRequest, + AggregatedListMachineTypesRequest, + AggregatedListNetworkEndpointGroupsRequest, + AggregatedListNodeGroupsRequest, + AggregatedListNodeTemplatesRequest, + AggregatedListNodeTypesRequest, + AggregatedListPacketMirroringsRequest, + AggregatedListPublicDelegatedPrefixesRequest, + AggregatedListRegionCommitmentsRequest, + AggregatedListReservationsRequest, + AggregatedListResourcePoliciesRequest, + AggregatedListRoutersRequest, + AggregatedListServiceAttachmentsRequest, + AggregatedListSslCertificatesRequest, + AggregatedListSubnetworksRequest, + AggregatedListTargetHttpProxiesRequest, + AggregatedListTargetHttpsProxiesRequest, + AggregatedListTargetInstancesRequest, + AggregatedListTargetPoolsRequest, + AggregatedListTargetVpnGatewaysRequest, + AggregatedListUrlMapsRequest, + AggregatedListVpnGatewaysRequest, + AggregatedListVpnTunnelsRequest, + AliasIpRange, + AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk, + AllocationSpecificSKUAllocationReservedInstanceProperties, + AllocationSpecificSKUReservation, + Allowed, + ApplyUpdatesToInstancesInstanceGroupManagerRequest, + ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest, + AttachDiskInstanceRequest, + AttachedDisk, + AttachedDiskInitializeParams, + AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest, + AttachNetworkEndpointsNetworkEndpointGroupRequest, + AuditConfig, + AuditLogConfig, + AuthorizationLoggingOptions, + Autoscaler, + AutoscalerAggregatedList, + AutoscalerList, + AutoscalersScopedList, + AutoscalerStatusDetails, + AutoscalingPolicy, + AutoscalingPolicyCpuUtilization, + AutoscalingPolicyCustomMetricUtilization, + AutoscalingPolicyLoadBalancingUtilization, + AutoscalingPolicyScaleInControl, + AutoscalingPolicyScalingSchedule, + Backend, + BackendBucket, + BackendBucketCdnPolicy, + BackendBucketCdnPolicyBypassCacheOnRequestHeader, + BackendBucketCdnPolicyNegativeCachingPolicy, + BackendBucketList, + BackendService, + BackendServiceAggregatedList, + BackendServiceCdnPolicy, + BackendServiceCdnPolicyBypassCacheOnRequestHeader, + BackendServiceCdnPolicyNegativeCachingPolicy, + BackendServiceFailoverPolicy, + BackendServiceGroupHealth, + BackendServiceIAP, + BackendServiceList, + BackendServiceLogConfig, + BackendServiceReference, + BackendServicesScopedList, + Binding, + BulkInsertInstanceRequest, + BulkInsertInstanceResource, + BulkInsertInstanceResourcePerInstanceProperties, + BulkInsertRegionInstanceRequest, + CacheInvalidationRule, + CacheKeyPolicy, + CircuitBreakers, + CloneRulesFirewallPolicyRequest, + Commitment, + CommitmentAggregatedList, + CommitmentList, + CommitmentsScopedList, + Condition, + ConfidentialInstanceConfig, + ConnectionDraining, + ConsistentHashLoadBalancerSettings, + ConsistentHashLoadBalancerSettingsHttpCookie, + CorsPolicy, + CreateInstancesInstanceGroupManagerRequest, + CreateInstancesRegionInstanceGroupManagerRequest, + CreateSnapshotDiskRequest, + CreateSnapshotRegionDiskRequest, + CustomerEncryptionKey, + CustomerEncryptionKeyProtectedDisk, + Data, + DeleteAccessConfigInstanceRequest, + DeleteAddressRequest, + DeleteAutoscalerRequest, + DeleteBackendBucketRequest, + DeleteBackendServiceRequest, + DeleteDiskRequest, + DeleteExternalVpnGatewayRequest, + DeleteFirewallPolicyRequest, + DeleteFirewallRequest, + DeleteForwardingRuleRequest, + DeleteGlobalAddressRequest, + DeleteGlobalForwardingRuleRequest, + DeleteGlobalNetworkEndpointGroupRequest, + DeleteGlobalOperationRequest, + DeleteGlobalOperationResponse, + DeleteGlobalOrganizationOperationRequest, + DeleteGlobalOrganizationOperationResponse, + DeleteGlobalPublicDelegatedPrefixeRequest, + DeleteHealthCheckRequest, + DeleteImageRequest, + DeleteInstanceGroupManagerRequest, + DeleteInstanceGroupRequest, + DeleteInstanceRequest, + DeleteInstancesInstanceGroupManagerRequest, + DeleteInstancesRegionInstanceGroupManagerRequest, + DeleteInstanceTemplateRequest, + DeleteInterconnectAttachmentRequest, + DeleteInterconnectRequest, + DeleteLicenseRequest, + DeleteNetworkEndpointGroupRequest, + DeleteNetworkRequest, + DeleteNodeGroupRequest, + DeleteNodesNodeGroupRequest, + DeleteNodeTemplateRequest, + DeletePacketMirroringRequest, + DeletePerInstanceConfigsInstanceGroupManagerRequest, + DeletePerInstanceConfigsRegionInstanceGroupManagerRequest, + DeletePublicAdvertisedPrefixeRequest, + DeletePublicDelegatedPrefixeRequest, + DeleteRegionAutoscalerRequest, + DeleteRegionBackendServiceRequest, + DeleteRegionDiskRequest, + DeleteRegionHealthCheckRequest, + DeleteRegionHealthCheckServiceRequest, + DeleteRegionInstanceGroupManagerRequest, + DeleteRegionNetworkEndpointGroupRequest, + DeleteRegionNotificationEndpointRequest, + DeleteRegionOperationRequest, + DeleteRegionOperationResponse, + DeleteRegionSslCertificateRequest, + DeleteRegionTargetHttpProxyRequest, + DeleteRegionTargetHttpsProxyRequest, + DeleteRegionUrlMapRequest, + DeleteReservationRequest, + DeleteResourcePolicyRequest, + DeleteRouteRequest, + DeleteRouterRequest, + DeleteSecurityPolicyRequest, + DeleteServiceAttachmentRequest, + DeleteSignedUrlKeyBackendBucketRequest, + DeleteSignedUrlKeyBackendServiceRequest, + DeleteSnapshotRequest, + DeleteSslCertificateRequest, + DeleteSslPolicyRequest, + DeleteSubnetworkRequest, + DeleteTargetGrpcProxyRequest, + DeleteTargetHttpProxyRequest, + DeleteTargetHttpsProxyRequest, + DeleteTargetInstanceRequest, + DeleteTargetPoolRequest, + DeleteTargetSslProxyRequest, + DeleteTargetTcpProxyRequest, + DeleteTargetVpnGatewayRequest, + DeleteUrlMapRequest, + DeleteVpnGatewayRequest, + DeleteVpnTunnelRequest, + DeleteZoneOperationRequest, + DeleteZoneOperationResponse, + Denied, + DeprecateImageRequest, + DeprecationStatus, + DetachDiskInstanceRequest, + DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest, + DetachNetworkEndpointsNetworkEndpointGroupRequest, + DisableXpnHostProjectRequest, + DisableXpnResourceProjectRequest, + Disk, + DiskAggregatedList, + DiskInstantiationConfig, + DiskList, + DiskMoveRequest, + DisksAddResourcePoliciesRequest, + DisksRemoveResourcePoliciesRequest, + DisksResizeRequest, + DisksScopedList, + DiskType, + DiskTypeAggregatedList, + DiskTypeList, + DiskTypesScopedList, + DisplayDevice, + DistributionPolicy, + DistributionPolicyZoneConfiguration, + Duration, + EnableXpnHostProjectRequest, + EnableXpnResourceProjectRequest, + Error, + Errors, + ExchangedPeeringRoute, + ExchangedPeeringRoutesList, + ExpandIpCidrRangeSubnetworkRequest, + Expr, + ExternalVpnGateway, + ExternalVpnGatewayInterface, + ExternalVpnGatewayList, + FileContentBuffer, + Firewall, + FirewallList, + FirewallLogConfig, + FirewallPoliciesListAssociationsResponse, + FirewallPolicy, + FirewallPolicyAssociation, + FirewallPolicyList, + FirewallPolicyRule, + FirewallPolicyRuleMatcher, + FirewallPolicyRuleMatcherLayer4Config, + FixedOrPercent, + ForwardingRule, + ForwardingRuleAggregatedList, + ForwardingRuleList, + ForwardingRuleReference, + ForwardingRuleServiceDirectoryRegistration, + ForwardingRulesScopedList, + GetAcceleratorTypeRequest, + GetAddressRequest, + GetAssociationFirewallPolicyRequest, + GetAutoscalerRequest, + GetBackendBucketRequest, + GetBackendServiceRequest, + GetDiagnosticsInterconnectRequest, + GetDiskRequest, + GetDiskTypeRequest, + GetEffectiveFirewallsInstanceRequest, + GetEffectiveFirewallsNetworkRequest, + GetExternalVpnGatewayRequest, + GetFirewallPolicyRequest, + GetFirewallRequest, + GetForwardingRuleRequest, + GetFromFamilyImageRequest, + GetGlobalAddressRequest, + GetGlobalForwardingRuleRequest, + GetGlobalNetworkEndpointGroupRequest, + GetGlobalOperationRequest, + GetGlobalOrganizationOperationRequest, + GetGlobalPublicDelegatedPrefixeRequest, + GetGuestAttributesInstanceRequest, + GetHealthBackendServiceRequest, + GetHealthCheckRequest, + GetHealthRegionBackendServiceRequest, + GetHealthTargetPoolRequest, + GetIamPolicyDiskRequest, + GetIamPolicyFirewallPolicyRequest, + GetIamPolicyImageRequest, + GetIamPolicyInstanceRequest, + GetIamPolicyInstanceTemplateRequest, + GetIamPolicyLicenseRequest, + GetIamPolicyNodeGroupRequest, + GetIamPolicyNodeTemplateRequest, + GetIamPolicyRegionDiskRequest, + GetIamPolicyReservationRequest, + GetIamPolicyResourcePolicyRequest, + GetIamPolicyServiceAttachmentRequest, + GetIamPolicySnapshotRequest, + GetIamPolicySubnetworkRequest, + GetImageFamilyViewRequest, + GetImageRequest, + GetInstanceGroupManagerRequest, + GetInstanceGroupRequest, + GetInstanceRequest, + GetInstanceTemplateRequest, + GetInterconnectAttachmentRequest, + GetInterconnectLocationRequest, + GetInterconnectRequest, + GetLicenseCodeRequest, + GetLicenseRequest, + GetMachineTypeRequest, + GetNatMappingInfoRoutersRequest, + GetNetworkEndpointGroupRequest, + GetNetworkRequest, + GetNodeGroupRequest, + GetNodeTemplateRequest, + GetNodeTypeRequest, + GetPacketMirroringRequest, + GetProjectRequest, + GetPublicAdvertisedPrefixeRequest, + GetPublicDelegatedPrefixeRequest, + GetRegionAutoscalerRequest, + GetRegionBackendServiceRequest, + GetRegionCommitmentRequest, + GetRegionDiskRequest, + GetRegionDiskTypeRequest, + GetRegionHealthCheckRequest, + GetRegionHealthCheckServiceRequest, + GetRegionInstanceGroupManagerRequest, + GetRegionInstanceGroupRequest, + GetRegionNetworkEndpointGroupRequest, + GetRegionNotificationEndpointRequest, + GetRegionOperationRequest, + GetRegionRequest, + GetRegionSslCertificateRequest, + GetRegionTargetHttpProxyRequest, + GetRegionTargetHttpsProxyRequest, + GetRegionUrlMapRequest, + GetReservationRequest, + GetResourcePolicyRequest, + GetRouteRequest, + GetRouterRequest, + GetRouterStatusRouterRequest, + GetRuleFirewallPolicyRequest, + GetRuleSecurityPolicyRequest, + GetScreenshotInstanceRequest, + GetSecurityPolicyRequest, + GetSerialPortOutputInstanceRequest, + GetServiceAttachmentRequest, + GetShieldedInstanceIdentityInstanceRequest, + GetSnapshotRequest, + GetSslCertificateRequest, + GetSslPolicyRequest, + GetStatusVpnGatewayRequest, + GetSubnetworkRequest, + GetTargetGrpcProxyRequest, + GetTargetHttpProxyRequest, + GetTargetHttpsProxyRequest, + GetTargetInstanceRequest, + GetTargetPoolRequest, + GetTargetSslProxyRequest, + GetTargetTcpProxyRequest, + GetTargetVpnGatewayRequest, + GetUrlMapRequest, + GetVpnGatewayRequest, + GetVpnTunnelRequest, + GetXpnHostProjectRequest, + GetXpnResourcesProjectsRequest, + GetZoneOperationRequest, + GetZoneRequest, + GlobalNetworkEndpointGroupsAttachEndpointsRequest, + GlobalNetworkEndpointGroupsDetachEndpointsRequest, + GlobalOrganizationSetPolicyRequest, + GlobalSetLabelsRequest, + GlobalSetPolicyRequest, + GRPCHealthCheck, + GuestAttributes, + GuestAttributesEntry, + GuestAttributesValue, + GuestOsFeature, + HealthCheck, + HealthCheckList, + HealthCheckLogConfig, + HealthCheckReference, + HealthChecksAggregatedList, + HealthCheckService, + HealthCheckServiceReference, + HealthCheckServicesList, + HealthChecksScopedList, + HealthStatus, + HealthStatusForNetworkEndpoint, + HostRule, + HTTP2HealthCheck, + HttpFaultAbort, + HttpFaultDelay, + HttpFaultInjection, + HttpHeaderAction, + HttpHeaderMatch, + HttpHeaderOption, + HTTPHealthCheck, + HttpQueryParameterMatch, + HttpRedirectAction, + HttpRetryPolicy, + HttpRouteAction, + HttpRouteRule, + HttpRouteRuleMatch, + HTTPSHealthCheck, + Image, + ImageFamilyView, + ImageList, + InitialStateConfig, + InsertAddressRequest, + InsertAutoscalerRequest, + InsertBackendBucketRequest, + InsertBackendServiceRequest, + InsertDiskRequest, + InsertExternalVpnGatewayRequest, + InsertFirewallPolicyRequest, + InsertFirewallRequest, + InsertForwardingRuleRequest, + InsertGlobalAddressRequest, + InsertGlobalForwardingRuleRequest, + InsertGlobalNetworkEndpointGroupRequest, + InsertGlobalPublicDelegatedPrefixeRequest, + InsertHealthCheckRequest, + InsertImageRequest, + InsertInstanceGroupManagerRequest, + InsertInstanceGroupRequest, + InsertInstanceRequest, + InsertInstanceTemplateRequest, + InsertInterconnectAttachmentRequest, + InsertInterconnectRequest, + InsertLicenseRequest, + InsertNetworkEndpointGroupRequest, + InsertNetworkRequest, + InsertNodeGroupRequest, + InsertNodeTemplateRequest, + InsertPacketMirroringRequest, + InsertPublicAdvertisedPrefixeRequest, + InsertPublicDelegatedPrefixeRequest, + InsertRegionAutoscalerRequest, + InsertRegionBackendServiceRequest, + InsertRegionCommitmentRequest, + InsertRegionDiskRequest, + InsertRegionHealthCheckRequest, + InsertRegionHealthCheckServiceRequest, + InsertRegionInstanceGroupManagerRequest, + InsertRegionNetworkEndpointGroupRequest, + InsertRegionNotificationEndpointRequest, + InsertRegionSslCertificateRequest, + InsertRegionTargetHttpProxyRequest, + InsertRegionTargetHttpsProxyRequest, + InsertRegionUrlMapRequest, + InsertReservationRequest, + InsertResourcePolicyRequest, + InsertRouteRequest, + InsertRouterRequest, + InsertSecurityPolicyRequest, + InsertServiceAttachmentRequest, + InsertSslCertificateRequest, + InsertSslPolicyRequest, + InsertSubnetworkRequest, + InsertTargetGrpcProxyRequest, + InsertTargetHttpProxyRequest, + InsertTargetHttpsProxyRequest, + InsertTargetInstanceRequest, + InsertTargetPoolRequest, + InsertTargetSslProxyRequest, + InsertTargetTcpProxyRequest, + InsertTargetVpnGatewayRequest, + InsertUrlMapRequest, + InsertVpnGatewayRequest, + InsertVpnTunnelRequest, + Instance, + InstanceAggregatedList, + InstanceGroup, + InstanceGroupAggregatedList, + InstanceGroupList, + InstanceGroupManager, + InstanceGroupManagerActionsSummary, + InstanceGroupManagerAggregatedList, + InstanceGroupManagerAutoHealingPolicy, + InstanceGroupManagerList, + InstanceGroupManagersAbandonInstancesRequest, + InstanceGroupManagersApplyUpdatesRequest, + InstanceGroupManagersCreateInstancesRequest, + InstanceGroupManagersDeleteInstancesRequest, + InstanceGroupManagersDeletePerInstanceConfigsReq, + InstanceGroupManagersListErrorsResponse, + InstanceGroupManagersListManagedInstancesResponse, + InstanceGroupManagersListPerInstanceConfigsResp, + InstanceGroupManagersPatchPerInstanceConfigsReq, + InstanceGroupManagersRecreateInstancesRequest, + InstanceGroupManagersScopedList, + InstanceGroupManagersSetInstanceTemplateRequest, + InstanceGroupManagersSetTargetPoolsRequest, + InstanceGroupManagerStatus, + InstanceGroupManagerStatusStateful, + InstanceGroupManagerStatusStatefulPerInstanceConfigs, + InstanceGroupManagerStatusVersionTarget, + InstanceGroupManagersUpdatePerInstanceConfigsReq, + InstanceGroupManagerUpdatePolicy, + InstanceGroupManagerVersion, + InstanceGroupsAddInstancesRequest, + InstanceGroupsListInstances, + InstanceGroupsListInstancesRequest, + InstanceGroupsRemoveInstancesRequest, + InstanceGroupsScopedList, + InstanceGroupsSetNamedPortsRequest, + InstanceList, + InstanceListReferrers, + InstanceManagedByIgmError, + InstanceManagedByIgmErrorInstanceActionDetails, + InstanceManagedByIgmErrorManagedInstanceError, + InstanceMoveRequest, + InstanceProperties, + InstanceReference, + InstancesAddResourcePoliciesRequest, + InstancesGetEffectiveFirewallsResponse, + InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy, + InstancesRemoveResourcePoliciesRequest, + InstancesScopedList, + InstancesSetLabelsRequest, + InstancesSetMachineResourcesRequest, + InstancesSetMachineTypeRequest, + InstancesSetMinCpuPlatformRequest, + InstancesSetServiceAccountRequest, + InstancesStartWithEncryptionKeyRequest, + InstanceTemplate, + InstanceTemplateList, + InstanceWithNamedPorts, + Int64RangeMatch, + Interconnect, + InterconnectAttachment, + InterconnectAttachmentAggregatedList, + InterconnectAttachmentList, + InterconnectAttachmentPartnerMetadata, + InterconnectAttachmentPrivateInfo, + InterconnectAttachmentsScopedList, + InterconnectCircuitInfo, + InterconnectDiagnostics, + InterconnectDiagnosticsARPEntry, + InterconnectDiagnosticsLinkLACPStatus, + InterconnectDiagnosticsLinkOpticalPower, + InterconnectDiagnosticsLinkStatus, + InterconnectList, + InterconnectLocation, + InterconnectLocationList, + InterconnectLocationRegionInfo, + InterconnectOutageNotification, + InterconnectsGetDiagnosticsResponse, + InvalidateCacheUrlMapRequest, + Items, + License, + LicenseCode, + LicenseCodeLicenseAlias, + LicenseResourceCommitment, + LicenseResourceRequirements, + LicensesListResponse, + ListAcceleratorTypesRequest, + ListAddressesRequest, + ListAssociationsFirewallPolicyRequest, + ListAutoscalersRequest, + ListAvailableFeaturesSslPoliciesRequest, + ListBackendBucketsRequest, + ListBackendServicesRequest, + ListDisksRequest, + ListDiskTypesRequest, + ListErrorsInstanceGroupManagersRequest, + ListErrorsRegionInstanceGroupManagersRequest, + ListExternalVpnGatewaysRequest, + ListFirewallPoliciesRequest, + ListFirewallsRequest, + ListForwardingRulesRequest, + ListGlobalAddressesRequest, + ListGlobalForwardingRulesRequest, + ListGlobalNetworkEndpointGroupsRequest, + ListGlobalOperationsRequest, + ListGlobalOrganizationOperationsRequest, + ListGlobalPublicDelegatedPrefixesRequest, + ListHealthChecksRequest, + ListImagesRequest, + ListInstanceGroupManagersRequest, + ListInstanceGroupsRequest, + ListInstancesInstanceGroupsRequest, + ListInstancesRegionInstanceGroupsRequest, + ListInstancesRequest, + ListInstanceTemplatesRequest, + ListInterconnectAttachmentsRequest, + ListInterconnectLocationsRequest, + ListInterconnectsRequest, + ListLicensesRequest, + ListMachineTypesRequest, + ListManagedInstancesInstanceGroupManagersRequest, + ListManagedInstancesRegionInstanceGroupManagersRequest, + ListNetworkEndpointGroupsRequest, + ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest, + ListNetworkEndpointsNetworkEndpointGroupsRequest, + ListNetworksRequest, + ListNodeGroupsRequest, + ListNodesNodeGroupsRequest, + ListNodeTemplatesRequest, + ListNodeTypesRequest, + ListPacketMirroringsRequest, + ListPeeringRoutesNetworksRequest, + ListPerInstanceConfigsInstanceGroupManagersRequest, + ListPerInstanceConfigsRegionInstanceGroupManagersRequest, + ListPreconfiguredExpressionSetsSecurityPoliciesRequest, + ListPublicAdvertisedPrefixesRequest, + ListPublicDelegatedPrefixesRequest, + ListReferrersInstancesRequest, + ListRegionAutoscalersRequest, + ListRegionBackendServicesRequest, + ListRegionCommitmentsRequest, + ListRegionDisksRequest, + ListRegionDiskTypesRequest, + ListRegionHealthCheckServicesRequest, + ListRegionHealthChecksRequest, + ListRegionInstanceGroupManagersRequest, + ListRegionInstanceGroupsRequest, + ListRegionNetworkEndpointGroupsRequest, + ListRegionNotificationEndpointsRequest, + ListRegionOperationsRequest, + ListRegionsRequest, + ListRegionSslCertificatesRequest, + ListRegionTargetHttpProxiesRequest, + ListRegionTargetHttpsProxiesRequest, + ListRegionUrlMapsRequest, + ListReservationsRequest, + ListResourcePoliciesRequest, + ListRoutersRequest, + ListRoutesRequest, + ListSecurityPoliciesRequest, + ListServiceAttachmentsRequest, + ListSnapshotsRequest, + ListSslCertificatesRequest, + ListSslPoliciesRequest, + ListSubnetworksRequest, + ListTargetGrpcProxiesRequest, + ListTargetHttpProxiesRequest, + ListTargetHttpsProxiesRequest, + ListTargetInstancesRequest, + ListTargetPoolsRequest, + ListTargetSslProxiesRequest, + ListTargetTcpProxiesRequest, + ListTargetVpnGatewaysRequest, + ListUrlMapsRequest, + ListUsableSubnetworksRequest, + ListVpnGatewaysRequest, + ListVpnTunnelsRequest, + ListXpnHostsProjectsRequest, + ListZoneOperationsRequest, + ListZonesRequest, + LocalDisk, + LocationPolicy, + LocationPolicyLocation, + LogConfig, + LogConfigCloudAuditOptions, + LogConfigCounterOptions, + LogConfigCounterOptionsCustomField, + LogConfigDataAccessOptions, + MachineType, + MachineTypeAggregatedList, + MachineTypeList, + MachineTypesScopedList, + ManagedInstance, + ManagedInstanceInstanceHealth, + ManagedInstanceLastAttempt, + ManagedInstanceVersion, + Metadata, + MetadataFilter, + MetadataFilterLabelMatch, + MoveDiskProjectRequest, + MoveFirewallPolicyRequest, + MoveInstanceProjectRequest, + NamedPort, + Network, + NetworkEndpoint, + NetworkEndpointGroup, + NetworkEndpointGroupAggregatedList, + NetworkEndpointGroupAppEngine, + NetworkEndpointGroupCloudFunction, + NetworkEndpointGroupCloudRun, + NetworkEndpointGroupList, + NetworkEndpointGroupsAttachEndpointsRequest, + NetworkEndpointGroupsDetachEndpointsRequest, + NetworkEndpointGroupsListEndpointsRequest, + NetworkEndpointGroupsListNetworkEndpoints, + NetworkEndpointGroupsScopedList, + NetworkEndpointWithHealthStatus, + NetworkInterface, + NetworkList, + NetworkPeering, + NetworkRoutingConfig, + NetworksAddPeeringRequest, + NetworksGetEffectiveFirewallsResponse, + NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy, + NetworksRemovePeeringRequest, + NetworksUpdatePeeringRequest, + NodeGroup, + NodeGroupAggregatedList, + NodeGroupAutoscalingPolicy, + NodeGroupList, + NodeGroupMaintenanceWindow, + NodeGroupNode, + NodeGroupsAddNodesRequest, + NodeGroupsDeleteNodesRequest, + NodeGroupsListNodes, + NodeGroupsScopedList, + NodeGroupsSetNodeTemplateRequest, + NodeTemplate, + NodeTemplateAggregatedList, + NodeTemplateList, + NodeTemplateNodeTypeFlexibility, + NodeTemplatesScopedList, + NodeType, + NodeTypeAggregatedList, + NodeTypeList, + NodeTypesScopedList, + NotificationEndpoint, + NotificationEndpointGrpcSettings, + NotificationEndpointList, + Operation, + OperationAggregatedList, + OperationList, + OperationsScopedList, + OutlierDetection, + PacketMirroring, + PacketMirroringAggregatedList, + PacketMirroringFilter, + PacketMirroringForwardingRuleInfo, + PacketMirroringList, + PacketMirroringMirroredResourceInfo, + PacketMirroringMirroredResourceInfoInstanceInfo, + PacketMirroringMirroredResourceInfoSubnetInfo, + PacketMirroringNetworkInfo, + PacketMirroringsScopedList, + PatchAutoscalerRequest, + PatchBackendBucketRequest, + PatchBackendServiceRequest, + PatchFirewallPolicyRequest, + PatchFirewallRequest, + PatchForwardingRuleRequest, + PatchGlobalForwardingRuleRequest, + PatchGlobalPublicDelegatedPrefixeRequest, + PatchHealthCheckRequest, + PatchImageRequest, + PatchInstanceGroupManagerRequest, + PatchInterconnectAttachmentRequest, + PatchInterconnectRequest, + PatchNetworkRequest, + PatchNodeGroupRequest, + PatchPacketMirroringRequest, + PatchPerInstanceConfigsInstanceGroupManagerRequest, + PatchPerInstanceConfigsRegionInstanceGroupManagerRequest, + PatchPublicAdvertisedPrefixeRequest, + PatchPublicDelegatedPrefixeRequest, + PatchRegionAutoscalerRequest, + PatchRegionBackendServiceRequest, + PatchRegionHealthCheckRequest, + PatchRegionHealthCheckServiceRequest, + PatchRegionInstanceGroupManagerRequest, + PatchRegionUrlMapRequest, + PatchRouterRequest, + PatchRuleFirewallPolicyRequest, + PatchRuleSecurityPolicyRequest, + PatchSecurityPolicyRequest, + PatchServiceAttachmentRequest, + PatchSslPolicyRequest, + PatchSubnetworkRequest, + PatchTargetGrpcProxyRequest, + PatchTargetHttpProxyRequest, + PatchTargetHttpsProxyRequest, + PatchUrlMapRequest, + PathMatcher, + PathRule, + PerInstanceConfig, + Policy, + PreconfiguredWafSet, + PreservedState, + PreservedStatePreservedDisk, + PreviewRouterRequest, + Project, + ProjectsDisableXpnResourceRequest, + ProjectsEnableXpnResourceRequest, + ProjectsGetXpnResources, + ProjectsListXpnHostsRequest, + ProjectsSetDefaultNetworkTierRequest, + PublicAdvertisedPrefix, + PublicAdvertisedPrefixList, + PublicAdvertisedPrefixPublicDelegatedPrefix, + PublicDelegatedPrefix, + PublicDelegatedPrefixAggregatedList, + PublicDelegatedPrefixesScopedList, + PublicDelegatedPrefixList, + PublicDelegatedPrefixPublicDelegatedSubPrefix, + Quota, + RawDisk, + RecreateInstancesInstanceGroupManagerRequest, + RecreateInstancesRegionInstanceGroupManagerRequest, + Reference, + Region, + RegionAutoscalerList, + RegionDisksAddResourcePoliciesRequest, + RegionDisksRemoveResourcePoliciesRequest, + RegionDisksResizeRequest, + RegionDiskTypeList, + RegionInstanceGroupList, + RegionInstanceGroupManagerDeleteInstanceConfigReq, + RegionInstanceGroupManagerList, + RegionInstanceGroupManagerPatchInstanceConfigReq, + RegionInstanceGroupManagersAbandonInstancesRequest, + RegionInstanceGroupManagersApplyUpdatesRequest, + RegionInstanceGroupManagersCreateInstancesRequest, + RegionInstanceGroupManagersDeleteInstancesRequest, + RegionInstanceGroupManagersListErrorsResponse, + RegionInstanceGroupManagersListInstanceConfigsResp, + RegionInstanceGroupManagersListInstancesResponse, + RegionInstanceGroupManagersRecreateRequest, + RegionInstanceGroupManagersSetTargetPoolsRequest, + RegionInstanceGroupManagersSetTemplateRequest, + RegionInstanceGroupManagerUpdateInstanceConfigReq, + RegionInstanceGroupsListInstances, + RegionInstanceGroupsListInstancesRequest, + RegionInstanceGroupsSetNamedPortsRequest, + RegionList, + RegionSetLabelsRequest, + RegionSetPolicyRequest, + RegionTargetHttpsProxiesSetSslCertificatesRequest, + RegionUrlMapsValidateRequest, + RemoveAssociationFirewallPolicyRequest, + RemoveHealthCheckTargetPoolRequest, + RemoveInstancesInstanceGroupRequest, + RemoveInstanceTargetPoolRequest, + RemovePeeringNetworkRequest, + RemoveResourcePoliciesDiskRequest, + RemoveResourcePoliciesInstanceRequest, + RemoveResourcePoliciesRegionDiskRequest, + RemoveRuleFirewallPolicyRequest, + RemoveRuleSecurityPolicyRequest, + RequestMirrorPolicy, + Reservation, + ReservationAffinity, + ReservationAggregatedList, + ReservationList, + ReservationsResizeRequest, + ReservationsScopedList, + ResetInstanceRequest, + ResizeDiskRequest, + ResizeInstanceGroupManagerRequest, + ResizeRegionDiskRequest, + ResizeRegionInstanceGroupManagerRequest, + ResizeReservationRequest, + ResourceCommitment, + ResourceGroupReference, + ResourcePoliciesScopedList, + ResourcePolicy, + ResourcePolicyAggregatedList, + ResourcePolicyDailyCycle, + ResourcePolicyGroupPlacementPolicy, + ResourcePolicyHourlyCycle, + ResourcePolicyInstanceSchedulePolicy, + ResourcePolicyInstanceSchedulePolicySchedule, + ResourcePolicyList, + ResourcePolicyResourceStatus, + ResourcePolicyResourceStatusInstanceSchedulePolicyStatus, + ResourcePolicySnapshotSchedulePolicy, + ResourcePolicySnapshotSchedulePolicyRetentionPolicy, + ResourcePolicySnapshotSchedulePolicySchedule, + ResourcePolicySnapshotSchedulePolicySnapshotProperties, + ResourcePolicyWeeklyCycle, + ResourcePolicyWeeklyCycleDayOfWeek, + Route, + RouteAsPath, + RouteList, + Router, + RouterAdvertisedIpRange, + RouterAggregatedList, + RouterBgp, + RouterBgpPeer, + RouterBgpPeerBfd, + RouterInterface, + RouterList, + RouterNat, + RouterNatLogConfig, + RouterNatRule, + RouterNatRuleAction, + RouterNatSubnetworkToNat, + RoutersPreviewResponse, + RoutersScopedList, + RouterStatus, + RouterStatusBgpPeerStatus, + RouterStatusNatStatus, + RouterStatusNatStatusNatRuleStatus, + RouterStatusResponse, + Rule, + ScalingScheduleStatus, + Scheduling, + SchedulingNodeAffinity, + ScratchDisks, + Screenshot, + SecurityPoliciesListPreconfiguredExpressionSetsResponse, + SecurityPoliciesWafConfig, + SecurityPolicy, + SecurityPolicyAdaptiveProtectionConfig, + SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig, + SecurityPolicyAdvancedOptionsConfig, + SecurityPolicyList, + SecurityPolicyReference, + SecurityPolicyRule, + SecurityPolicyRuleMatcher, + SecurityPolicyRuleMatcherConfig, + SecuritySettings, + SendDiagnosticInterruptInstanceRequest, + SendDiagnosticInterruptInstanceResponse, + SerialPortOutput, + ServerBinding, + ServiceAccount, + ServiceAttachment, + ServiceAttachmentAggregatedList, + ServiceAttachmentConnectedEndpoint, + ServiceAttachmentConsumerProjectLimit, + ServiceAttachmentList, + ServiceAttachmentsScopedList, + SetBackendServiceTargetSslProxyRequest, + SetBackendServiceTargetTcpProxyRequest, + SetBackupTargetPoolRequest, + SetCommonInstanceMetadataProjectRequest, + SetDefaultNetworkTierProjectRequest, + SetDeletionProtectionInstanceRequest, + SetDiskAutoDeleteInstanceRequest, + SetIamPolicyDiskRequest, + SetIamPolicyFirewallPolicyRequest, + SetIamPolicyImageRequest, + SetIamPolicyInstanceRequest, + SetIamPolicyInstanceTemplateRequest, + SetIamPolicyLicenseRequest, + SetIamPolicyNodeGroupRequest, + SetIamPolicyNodeTemplateRequest, + SetIamPolicyRegionDiskRequest, + SetIamPolicyReservationRequest, + SetIamPolicyResourcePolicyRequest, + SetIamPolicyServiceAttachmentRequest, + SetIamPolicySnapshotRequest, + SetIamPolicySubnetworkRequest, + SetInstanceTemplateInstanceGroupManagerRequest, + SetInstanceTemplateRegionInstanceGroupManagerRequest, + SetLabelsDiskRequest, + SetLabelsExternalVpnGatewayRequest, + SetLabelsForwardingRuleRequest, + SetLabelsGlobalForwardingRuleRequest, + SetLabelsImageRequest, + SetLabelsInstanceRequest, + SetLabelsRegionDiskRequest, + SetLabelsSnapshotRequest, + SetLabelsVpnGatewayRequest, + SetMachineResourcesInstanceRequest, + SetMachineTypeInstanceRequest, + SetMetadataInstanceRequest, + SetMinCpuPlatformInstanceRequest, + SetNamedPortsInstanceGroupRequest, + SetNamedPortsRegionInstanceGroupRequest, + SetNodeTemplateNodeGroupRequest, + SetPrivateIpGoogleAccessSubnetworkRequest, + SetProxyHeaderTargetSslProxyRequest, + SetProxyHeaderTargetTcpProxyRequest, + SetQuicOverrideTargetHttpsProxyRequest, + SetSchedulingInstanceRequest, + SetSecurityPolicyBackendServiceRequest, + SetServiceAccountInstanceRequest, + SetShieldedInstanceIntegrityPolicyInstanceRequest, + SetSslCertificatesRegionTargetHttpsProxyRequest, + SetSslCertificatesTargetHttpsProxyRequest, + SetSslCertificatesTargetSslProxyRequest, + SetSslPolicyTargetHttpsProxyRequest, + SetSslPolicyTargetSslProxyRequest, + SetTagsInstanceRequest, + SetTargetForwardingRuleRequest, + SetTargetGlobalForwardingRuleRequest, + SetTargetPoolsInstanceGroupManagerRequest, + SetTargetPoolsRegionInstanceGroupManagerRequest, + SetUrlMapRegionTargetHttpProxyRequest, + SetUrlMapRegionTargetHttpsProxyRequest, + SetUrlMapTargetHttpProxyRequest, + SetUrlMapTargetHttpsProxyRequest, + SetUsageExportBucketProjectRequest, + ShieldedInstanceConfig, + ShieldedInstanceIdentity, + ShieldedInstanceIdentityEntry, + ShieldedInstanceIntegrityPolicy, + SignedUrlKey, + SimulateMaintenanceEventInstanceRequest, + Snapshot, + SnapshotList, + SourceInstanceParams, + SslCertificate, + SslCertificateAggregatedList, + SslCertificateList, + SslCertificateManagedSslCertificate, + SslCertificateSelfManagedSslCertificate, + SslCertificatesScopedList, + SSLHealthCheck, + SslPoliciesList, + SslPoliciesListAvailableFeaturesResponse, + SslPolicy, + SslPolicyReference, + StartInstanceRequest, + StartWithEncryptionKeyInstanceRequest, + StatefulPolicy, + StatefulPolicyPreservedState, + StatefulPolicyPreservedStateDiskDevice, + StopInstanceRequest, + Subnetwork, + SubnetworkAggregatedList, + SubnetworkList, + SubnetworkLogConfig, + SubnetworkSecondaryRange, + SubnetworksExpandIpCidrRangeRequest, + SubnetworksScopedList, + SubnetworksSetPrivateIpGoogleAccessRequest, + Subsetting, + SwitchToCustomModeNetworkRequest, + Tags, + TargetGrpcProxy, + TargetGrpcProxyList, + TargetHttpProxiesScopedList, + TargetHttpProxy, + TargetHttpProxyAggregatedList, + TargetHttpProxyList, + TargetHttpsProxiesScopedList, + TargetHttpsProxiesSetQuicOverrideRequest, + TargetHttpsProxiesSetSslCertificatesRequest, + TargetHttpsProxy, + TargetHttpsProxyAggregatedList, + TargetHttpsProxyList, + TargetInstance, + TargetInstanceAggregatedList, + TargetInstanceList, + TargetInstancesScopedList, + TargetPool, + TargetPoolAggregatedList, + TargetPoolInstanceHealth, + TargetPoolList, + TargetPoolsAddHealthCheckRequest, + TargetPoolsAddInstanceRequest, + TargetPoolsRemoveHealthCheckRequest, + TargetPoolsRemoveInstanceRequest, + TargetPoolsScopedList, + TargetReference, + TargetSslProxiesSetBackendServiceRequest, + TargetSslProxiesSetProxyHeaderRequest, + TargetSslProxiesSetSslCertificatesRequest, + TargetSslProxy, + TargetSslProxyList, + TargetTcpProxiesSetBackendServiceRequest, + TargetTcpProxiesSetProxyHeaderRequest, + TargetTcpProxy, + TargetTcpProxyList, + TargetVpnGateway, + TargetVpnGatewayAggregatedList, + TargetVpnGatewayList, + TargetVpnGatewaysScopedList, + TCPHealthCheck, + TestFailure, + TestIamPermissionsDiskRequest, + TestIamPermissionsExternalVpnGatewayRequest, + TestIamPermissionsFirewallPolicyRequest, + TestIamPermissionsImageRequest, + TestIamPermissionsInstanceRequest, + TestIamPermissionsInstanceTemplateRequest, + TestIamPermissionsLicenseCodeRequest, + TestIamPermissionsLicenseRequest, + TestIamPermissionsNetworkEndpointGroupRequest, + TestIamPermissionsNodeGroupRequest, + TestIamPermissionsNodeTemplateRequest, + TestIamPermissionsPacketMirroringRequest, + TestIamPermissionsRegionDiskRequest, + TestIamPermissionsReservationRequest, + TestIamPermissionsResourcePolicyRequest, + TestIamPermissionsServiceAttachmentRequest, + TestIamPermissionsSnapshotRequest, + TestIamPermissionsSubnetworkRequest, + TestIamPermissionsVpnGatewayRequest, + TestPermissionsRequest, + TestPermissionsResponse, + Uint128, + UpdateAccessConfigInstanceRequest, + UpdateAutoscalerRequest, + UpdateBackendBucketRequest, + UpdateBackendServiceRequest, + UpdateDisplayDeviceInstanceRequest, + UpdateFirewallRequest, + UpdateHealthCheckRequest, + UpdateInstanceRequest, + UpdateNetworkInterfaceInstanceRequest, + UpdatePeeringNetworkRequest, + UpdatePerInstanceConfigsInstanceGroupManagerRequest, + UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest, + UpdateRegionAutoscalerRequest, + UpdateRegionBackendServiceRequest, + UpdateRegionHealthCheckRequest, + UpdateRegionUrlMapRequest, + UpdateRouterRequest, + UpdateShieldedInstanceConfigInstanceRequest, + UpdateUrlMapRequest, + UrlMap, + UrlMapList, + UrlMapReference, + UrlMapsAggregatedList, + UrlMapsScopedList, + UrlMapsValidateRequest, + UrlMapsValidateResponse, + UrlMapTest, + UrlMapTestHeader, + UrlMapValidationResult, + UrlRewrite, + UsableSubnetwork, + UsableSubnetworksAggregatedList, + UsableSubnetworkSecondaryRange, + UsageExportLocation, + ValidateRegionUrlMapRequest, + ValidateUrlMapRequest, + VmEndpointNatMappings, + VmEndpointNatMappingsInterfaceNatMappings, + VmEndpointNatMappingsList, + VpnGateway, + VpnGatewayAggregatedList, + VpnGatewayList, + VpnGatewaysGetStatusResponse, + VpnGatewaysScopedList, + VpnGatewayStatus, + VpnGatewayStatusHighAvailabilityRequirementState, + VpnGatewayStatusTunnel, + VpnGatewayStatusVpnConnection, + VpnGatewayVpnGatewayInterface, + VpnTunnel, + VpnTunnelAggregatedList, + VpnTunnelList, + VpnTunnelsScopedList, + WafExpressionSet, + WafExpressionSetExpression, + WaitGlobalOperationRequest, + WaitRegionOperationRequest, + WaitZoneOperationRequest, + Warning, + Warnings, + WeightedBackendService, + XpnHostList, + XpnResourceId, + Zone, + ZoneList, + ZoneSetLabelsRequest, + ZoneSetPolicyRequest, +) + +__all__ = ( + 'AbandonInstancesInstanceGroupManagerRequest', + 'AbandonInstancesRegionInstanceGroupManagerRequest', + 'AcceleratorConfig', + 'Accelerators', + 'AcceleratorType', + 'AcceleratorTypeAggregatedList', + 'AcceleratorTypeList', + 'AcceleratorTypesScopedList', + 'AccessConfig', + 'AddAccessConfigInstanceRequest', + 'AddAssociationFirewallPolicyRequest', + 'AddHealthCheckTargetPoolRequest', + 'AddInstancesInstanceGroupRequest', + 'AddInstanceTargetPoolRequest', + 'AddNodesNodeGroupRequest', + 'AddPeeringNetworkRequest', + 'AddResourcePoliciesDiskRequest', + 'AddResourcePoliciesInstanceRequest', + 'AddResourcePoliciesRegionDiskRequest', + 'Address', + 'AddressAggregatedList', + 'AddressesScopedList', + 'AddressList', + 'AddRuleFirewallPolicyRequest', + 'AddRuleSecurityPolicyRequest', + 'AddSignedUrlKeyBackendBucketRequest', + 'AddSignedUrlKeyBackendServiceRequest', + 'AdvancedMachineFeatures', + 'AggregatedListAcceleratorTypesRequest', + 'AggregatedListAddressesRequest', + 'AggregatedListAutoscalersRequest', + 'AggregatedListBackendServicesRequest', + 'AggregatedListDisksRequest', + 'AggregatedListDiskTypesRequest', + 'AggregatedListForwardingRulesRequest', + 'AggregatedListGlobalOperationsRequest', + 'AggregatedListHealthChecksRequest', + 'AggregatedListInstanceGroupManagersRequest', + 'AggregatedListInstanceGroupsRequest', + 'AggregatedListInstancesRequest', + 'AggregatedListInterconnectAttachmentsRequest', + 'AggregatedListMachineTypesRequest', + 'AggregatedListNetworkEndpointGroupsRequest', + 'AggregatedListNodeGroupsRequest', + 'AggregatedListNodeTemplatesRequest', + 'AggregatedListNodeTypesRequest', + 'AggregatedListPacketMirroringsRequest', + 'AggregatedListPublicDelegatedPrefixesRequest', + 'AggregatedListRegionCommitmentsRequest', + 'AggregatedListReservationsRequest', + 'AggregatedListResourcePoliciesRequest', + 'AggregatedListRoutersRequest', + 'AggregatedListServiceAttachmentsRequest', + 'AggregatedListSslCertificatesRequest', + 'AggregatedListSubnetworksRequest', + 'AggregatedListTargetHttpProxiesRequest', + 'AggregatedListTargetHttpsProxiesRequest', + 'AggregatedListTargetInstancesRequest', + 'AggregatedListTargetPoolsRequest', + 'AggregatedListTargetVpnGatewaysRequest', + 'AggregatedListUrlMapsRequest', + 'AggregatedListVpnGatewaysRequest', + 'AggregatedListVpnTunnelsRequest', + 'AliasIpRange', + 'AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk', + 'AllocationSpecificSKUAllocationReservedInstanceProperties', + 'AllocationSpecificSKUReservation', + 'Allowed', + 'ApplyUpdatesToInstancesInstanceGroupManagerRequest', + 'ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest', + 'AttachDiskInstanceRequest', + 'AttachedDisk', + 'AttachedDiskInitializeParams', + 'AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest', + 'AttachNetworkEndpointsNetworkEndpointGroupRequest', + 'AuditConfig', + 'AuditLogConfig', + 'AuthorizationLoggingOptions', + 'Autoscaler', + 'AutoscalerAggregatedList', + 'AutoscalerList', + 'AutoscalersScopedList', + 'AutoscalerStatusDetails', + 'AutoscalingPolicy', + 'AutoscalingPolicyCpuUtilization', + 'AutoscalingPolicyCustomMetricUtilization', + 'AutoscalingPolicyLoadBalancingUtilization', + 'AutoscalingPolicyScaleInControl', + 'AutoscalingPolicyScalingSchedule', + 'Backend', + 'BackendBucket', + 'BackendBucketCdnPolicy', + 'BackendBucketCdnPolicyBypassCacheOnRequestHeader', + 'BackendBucketCdnPolicyNegativeCachingPolicy', + 'BackendBucketList', + 'BackendService', + 'BackendServiceAggregatedList', + 'BackendServiceCdnPolicy', + 'BackendServiceCdnPolicyBypassCacheOnRequestHeader', + 'BackendServiceCdnPolicyNegativeCachingPolicy', + 'BackendServiceFailoverPolicy', + 'BackendServiceGroupHealth', + 'BackendServiceIAP', + 'BackendServiceList', + 'BackendServiceLogConfig', + 'BackendServiceReference', + 'BackendServicesScopedList', + 'Binding', + 'BulkInsertInstanceRequest', + 'BulkInsertInstanceResource', + 'BulkInsertInstanceResourcePerInstanceProperties', + 'BulkInsertRegionInstanceRequest', + 'CacheInvalidationRule', + 'CacheKeyPolicy', + 'CircuitBreakers', + 'CloneRulesFirewallPolicyRequest', + 'Commitment', + 'CommitmentAggregatedList', + 'CommitmentList', + 'CommitmentsScopedList', + 'Condition', + 'ConfidentialInstanceConfig', + 'ConnectionDraining', + 'ConsistentHashLoadBalancerSettings', + 'ConsistentHashLoadBalancerSettingsHttpCookie', + 'CorsPolicy', + 'CreateInstancesInstanceGroupManagerRequest', + 'CreateInstancesRegionInstanceGroupManagerRequest', + 'CreateSnapshotDiskRequest', + 'CreateSnapshotRegionDiskRequest', + 'CustomerEncryptionKey', + 'CustomerEncryptionKeyProtectedDisk', + 'Data', + 'DeleteAccessConfigInstanceRequest', + 'DeleteAddressRequest', + 'DeleteAutoscalerRequest', + 'DeleteBackendBucketRequest', + 'DeleteBackendServiceRequest', + 'DeleteDiskRequest', + 'DeleteExternalVpnGatewayRequest', + 'DeleteFirewallPolicyRequest', + 'DeleteFirewallRequest', + 'DeleteForwardingRuleRequest', + 'DeleteGlobalAddressRequest', + 'DeleteGlobalForwardingRuleRequest', + 'DeleteGlobalNetworkEndpointGroupRequest', + 'DeleteGlobalOperationRequest', + 'DeleteGlobalOperationResponse', + 'DeleteGlobalOrganizationOperationRequest', + 'DeleteGlobalOrganizationOperationResponse', + 'DeleteGlobalPublicDelegatedPrefixeRequest', + 'DeleteHealthCheckRequest', + 'DeleteImageRequest', + 'DeleteInstanceGroupManagerRequest', + 'DeleteInstanceGroupRequest', + 'DeleteInstanceRequest', + 'DeleteInstancesInstanceGroupManagerRequest', + 'DeleteInstancesRegionInstanceGroupManagerRequest', + 'DeleteInstanceTemplateRequest', + 'DeleteInterconnectAttachmentRequest', + 'DeleteInterconnectRequest', + 'DeleteLicenseRequest', + 'DeleteNetworkEndpointGroupRequest', + 'DeleteNetworkRequest', + 'DeleteNodeGroupRequest', + 'DeleteNodesNodeGroupRequest', + 'DeleteNodeTemplateRequest', + 'DeletePacketMirroringRequest', + 'DeletePerInstanceConfigsInstanceGroupManagerRequest', + 'DeletePerInstanceConfigsRegionInstanceGroupManagerRequest', + 'DeletePublicAdvertisedPrefixeRequest', + 'DeletePublicDelegatedPrefixeRequest', + 'DeleteRegionAutoscalerRequest', + 'DeleteRegionBackendServiceRequest', + 'DeleteRegionDiskRequest', + 'DeleteRegionHealthCheckRequest', + 'DeleteRegionHealthCheckServiceRequest', + 'DeleteRegionInstanceGroupManagerRequest', + 'DeleteRegionNetworkEndpointGroupRequest', + 'DeleteRegionNotificationEndpointRequest', + 'DeleteRegionOperationRequest', + 'DeleteRegionOperationResponse', + 'DeleteRegionSslCertificateRequest', + 'DeleteRegionTargetHttpProxyRequest', + 'DeleteRegionTargetHttpsProxyRequest', + 'DeleteRegionUrlMapRequest', + 'DeleteReservationRequest', + 'DeleteResourcePolicyRequest', + 'DeleteRouteRequest', + 'DeleteRouterRequest', + 'DeleteSecurityPolicyRequest', + 'DeleteServiceAttachmentRequest', + 'DeleteSignedUrlKeyBackendBucketRequest', + 'DeleteSignedUrlKeyBackendServiceRequest', + 'DeleteSnapshotRequest', + 'DeleteSslCertificateRequest', + 'DeleteSslPolicyRequest', + 'DeleteSubnetworkRequest', + 'DeleteTargetGrpcProxyRequest', + 'DeleteTargetHttpProxyRequest', + 'DeleteTargetHttpsProxyRequest', + 'DeleteTargetInstanceRequest', + 'DeleteTargetPoolRequest', + 'DeleteTargetSslProxyRequest', + 'DeleteTargetTcpProxyRequest', + 'DeleteTargetVpnGatewayRequest', + 'DeleteUrlMapRequest', + 'DeleteVpnGatewayRequest', + 'DeleteVpnTunnelRequest', + 'DeleteZoneOperationRequest', + 'DeleteZoneOperationResponse', + 'Denied', + 'DeprecateImageRequest', + 'DeprecationStatus', + 'DetachDiskInstanceRequest', + 'DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest', + 'DetachNetworkEndpointsNetworkEndpointGroupRequest', + 'DisableXpnHostProjectRequest', + 'DisableXpnResourceProjectRequest', + 'Disk', + 'DiskAggregatedList', + 'DiskInstantiationConfig', + 'DiskList', + 'DiskMoveRequest', + 'DisksAddResourcePoliciesRequest', + 'DisksRemoveResourcePoliciesRequest', + 'DisksResizeRequest', + 'DisksScopedList', + 'DiskType', + 'DiskTypeAggregatedList', + 'DiskTypeList', + 'DiskTypesScopedList', + 'DisplayDevice', + 'DistributionPolicy', + 'DistributionPolicyZoneConfiguration', + 'Duration', + 'EnableXpnHostProjectRequest', + 'EnableXpnResourceProjectRequest', + 'Error', + 'Errors', + 'ExchangedPeeringRoute', + 'ExchangedPeeringRoutesList', + 'ExpandIpCidrRangeSubnetworkRequest', + 'Expr', + 'ExternalVpnGateway', + 'ExternalVpnGatewayInterface', + 'ExternalVpnGatewayList', + 'FileContentBuffer', + 'Firewall', + 'FirewallList', + 'FirewallLogConfig', + 'FirewallPoliciesListAssociationsResponse', + 'FirewallPolicy', + 'FirewallPolicyAssociation', + 'FirewallPolicyList', + 'FirewallPolicyRule', + 'FirewallPolicyRuleMatcher', + 'FirewallPolicyRuleMatcherLayer4Config', + 'FixedOrPercent', + 'ForwardingRule', + 'ForwardingRuleAggregatedList', + 'ForwardingRuleList', + 'ForwardingRuleReference', + 'ForwardingRuleServiceDirectoryRegistration', + 'ForwardingRulesScopedList', + 'GetAcceleratorTypeRequest', + 'GetAddressRequest', + 'GetAssociationFirewallPolicyRequest', + 'GetAutoscalerRequest', + 'GetBackendBucketRequest', + 'GetBackendServiceRequest', + 'GetDiagnosticsInterconnectRequest', + 'GetDiskRequest', + 'GetDiskTypeRequest', + 'GetEffectiveFirewallsInstanceRequest', + 'GetEffectiveFirewallsNetworkRequest', + 'GetExternalVpnGatewayRequest', + 'GetFirewallPolicyRequest', + 'GetFirewallRequest', + 'GetForwardingRuleRequest', + 'GetFromFamilyImageRequest', + 'GetGlobalAddressRequest', + 'GetGlobalForwardingRuleRequest', + 'GetGlobalNetworkEndpointGroupRequest', + 'GetGlobalOperationRequest', + 'GetGlobalOrganizationOperationRequest', + 'GetGlobalPublicDelegatedPrefixeRequest', + 'GetGuestAttributesInstanceRequest', + 'GetHealthBackendServiceRequest', + 'GetHealthCheckRequest', + 'GetHealthRegionBackendServiceRequest', + 'GetHealthTargetPoolRequest', + 'GetIamPolicyDiskRequest', + 'GetIamPolicyFirewallPolicyRequest', + 'GetIamPolicyImageRequest', + 'GetIamPolicyInstanceRequest', + 'GetIamPolicyInstanceTemplateRequest', + 'GetIamPolicyLicenseRequest', + 'GetIamPolicyNodeGroupRequest', + 'GetIamPolicyNodeTemplateRequest', + 'GetIamPolicyRegionDiskRequest', + 'GetIamPolicyReservationRequest', + 'GetIamPolicyResourcePolicyRequest', + 'GetIamPolicyServiceAttachmentRequest', + 'GetIamPolicySnapshotRequest', + 'GetIamPolicySubnetworkRequest', + 'GetImageFamilyViewRequest', + 'GetImageRequest', + 'GetInstanceGroupManagerRequest', + 'GetInstanceGroupRequest', + 'GetInstanceRequest', + 'GetInstanceTemplateRequest', + 'GetInterconnectAttachmentRequest', + 'GetInterconnectLocationRequest', + 'GetInterconnectRequest', + 'GetLicenseCodeRequest', + 'GetLicenseRequest', + 'GetMachineTypeRequest', + 'GetNatMappingInfoRoutersRequest', + 'GetNetworkEndpointGroupRequest', + 'GetNetworkRequest', + 'GetNodeGroupRequest', + 'GetNodeTemplateRequest', + 'GetNodeTypeRequest', + 'GetPacketMirroringRequest', + 'GetProjectRequest', + 'GetPublicAdvertisedPrefixeRequest', + 'GetPublicDelegatedPrefixeRequest', + 'GetRegionAutoscalerRequest', + 'GetRegionBackendServiceRequest', + 'GetRegionCommitmentRequest', + 'GetRegionDiskRequest', + 'GetRegionDiskTypeRequest', + 'GetRegionHealthCheckRequest', + 'GetRegionHealthCheckServiceRequest', + 'GetRegionInstanceGroupManagerRequest', + 'GetRegionInstanceGroupRequest', + 'GetRegionNetworkEndpointGroupRequest', + 'GetRegionNotificationEndpointRequest', + 'GetRegionOperationRequest', + 'GetRegionRequest', + 'GetRegionSslCertificateRequest', + 'GetRegionTargetHttpProxyRequest', + 'GetRegionTargetHttpsProxyRequest', + 'GetRegionUrlMapRequest', + 'GetReservationRequest', + 'GetResourcePolicyRequest', + 'GetRouteRequest', + 'GetRouterRequest', + 'GetRouterStatusRouterRequest', + 'GetRuleFirewallPolicyRequest', + 'GetRuleSecurityPolicyRequest', + 'GetScreenshotInstanceRequest', + 'GetSecurityPolicyRequest', + 'GetSerialPortOutputInstanceRequest', + 'GetServiceAttachmentRequest', + 'GetShieldedInstanceIdentityInstanceRequest', + 'GetSnapshotRequest', + 'GetSslCertificateRequest', + 'GetSslPolicyRequest', + 'GetStatusVpnGatewayRequest', + 'GetSubnetworkRequest', + 'GetTargetGrpcProxyRequest', + 'GetTargetHttpProxyRequest', + 'GetTargetHttpsProxyRequest', + 'GetTargetInstanceRequest', + 'GetTargetPoolRequest', + 'GetTargetSslProxyRequest', + 'GetTargetTcpProxyRequest', + 'GetTargetVpnGatewayRequest', + 'GetUrlMapRequest', + 'GetVpnGatewayRequest', + 'GetVpnTunnelRequest', + 'GetXpnHostProjectRequest', + 'GetXpnResourcesProjectsRequest', + 'GetZoneOperationRequest', + 'GetZoneRequest', + 'GlobalNetworkEndpointGroupsAttachEndpointsRequest', + 'GlobalNetworkEndpointGroupsDetachEndpointsRequest', + 'GlobalOrganizationSetPolicyRequest', + 'GlobalSetLabelsRequest', + 'GlobalSetPolicyRequest', + 'GRPCHealthCheck', + 'GuestAttributes', + 'GuestAttributesEntry', + 'GuestAttributesValue', + 'GuestOsFeature', + 'HealthCheck', + 'HealthCheckList', + 'HealthCheckLogConfig', + 'HealthCheckReference', + 'HealthChecksAggregatedList', + 'HealthCheckService', + 'HealthCheckServiceReference', + 'HealthCheckServicesList', + 'HealthChecksScopedList', + 'HealthStatus', + 'HealthStatusForNetworkEndpoint', + 'HostRule', + 'HTTP2HealthCheck', + 'HttpFaultAbort', + 'HttpFaultDelay', + 'HttpFaultInjection', + 'HttpHeaderAction', + 'HttpHeaderMatch', + 'HttpHeaderOption', + 'HTTPHealthCheck', + 'HttpQueryParameterMatch', + 'HttpRedirectAction', + 'HttpRetryPolicy', + 'HttpRouteAction', + 'HttpRouteRule', + 'HttpRouteRuleMatch', + 'HTTPSHealthCheck', + 'Image', + 'ImageFamilyView', + 'ImageList', + 'InitialStateConfig', + 'InsertAddressRequest', + 'InsertAutoscalerRequest', + 'InsertBackendBucketRequest', + 'InsertBackendServiceRequest', + 'InsertDiskRequest', + 'InsertExternalVpnGatewayRequest', + 'InsertFirewallPolicyRequest', + 'InsertFirewallRequest', + 'InsertForwardingRuleRequest', + 'InsertGlobalAddressRequest', + 'InsertGlobalForwardingRuleRequest', + 'InsertGlobalNetworkEndpointGroupRequest', + 'InsertGlobalPublicDelegatedPrefixeRequest', + 'InsertHealthCheckRequest', + 'InsertImageRequest', + 'InsertInstanceGroupManagerRequest', + 'InsertInstanceGroupRequest', + 'InsertInstanceRequest', + 'InsertInstanceTemplateRequest', + 'InsertInterconnectAttachmentRequest', + 'InsertInterconnectRequest', + 'InsertLicenseRequest', + 'InsertNetworkEndpointGroupRequest', + 'InsertNetworkRequest', + 'InsertNodeGroupRequest', + 'InsertNodeTemplateRequest', + 'InsertPacketMirroringRequest', + 'InsertPublicAdvertisedPrefixeRequest', + 'InsertPublicDelegatedPrefixeRequest', + 'InsertRegionAutoscalerRequest', + 'InsertRegionBackendServiceRequest', + 'InsertRegionCommitmentRequest', + 'InsertRegionDiskRequest', + 'InsertRegionHealthCheckRequest', + 'InsertRegionHealthCheckServiceRequest', + 'InsertRegionInstanceGroupManagerRequest', + 'InsertRegionNetworkEndpointGroupRequest', + 'InsertRegionNotificationEndpointRequest', + 'InsertRegionSslCertificateRequest', + 'InsertRegionTargetHttpProxyRequest', + 'InsertRegionTargetHttpsProxyRequest', + 'InsertRegionUrlMapRequest', + 'InsertReservationRequest', + 'InsertResourcePolicyRequest', + 'InsertRouteRequest', + 'InsertRouterRequest', + 'InsertSecurityPolicyRequest', + 'InsertServiceAttachmentRequest', + 'InsertSslCertificateRequest', + 'InsertSslPolicyRequest', + 'InsertSubnetworkRequest', + 'InsertTargetGrpcProxyRequest', + 'InsertTargetHttpProxyRequest', + 'InsertTargetHttpsProxyRequest', + 'InsertTargetInstanceRequest', + 'InsertTargetPoolRequest', + 'InsertTargetSslProxyRequest', + 'InsertTargetTcpProxyRequest', + 'InsertTargetVpnGatewayRequest', + 'InsertUrlMapRequest', + 'InsertVpnGatewayRequest', + 'InsertVpnTunnelRequest', + 'Instance', + 'InstanceAggregatedList', + 'InstanceGroup', + 'InstanceGroupAggregatedList', + 'InstanceGroupList', + 'InstanceGroupManager', + 'InstanceGroupManagerActionsSummary', + 'InstanceGroupManagerAggregatedList', + 'InstanceGroupManagerAutoHealingPolicy', + 'InstanceGroupManagerList', + 'InstanceGroupManagersAbandonInstancesRequest', + 'InstanceGroupManagersApplyUpdatesRequest', + 'InstanceGroupManagersCreateInstancesRequest', + 'InstanceGroupManagersDeleteInstancesRequest', + 'InstanceGroupManagersDeletePerInstanceConfigsReq', + 'InstanceGroupManagersListErrorsResponse', + 'InstanceGroupManagersListManagedInstancesResponse', + 'InstanceGroupManagersListPerInstanceConfigsResp', + 'InstanceGroupManagersPatchPerInstanceConfigsReq', + 'InstanceGroupManagersRecreateInstancesRequest', + 'InstanceGroupManagersScopedList', + 'InstanceGroupManagersSetInstanceTemplateRequest', + 'InstanceGroupManagersSetTargetPoolsRequest', + 'InstanceGroupManagerStatus', + 'InstanceGroupManagerStatusStateful', + 'InstanceGroupManagerStatusStatefulPerInstanceConfigs', + 'InstanceGroupManagerStatusVersionTarget', + 'InstanceGroupManagersUpdatePerInstanceConfigsReq', + 'InstanceGroupManagerUpdatePolicy', + 'InstanceGroupManagerVersion', + 'InstanceGroupsAddInstancesRequest', + 'InstanceGroupsListInstances', + 'InstanceGroupsListInstancesRequest', + 'InstanceGroupsRemoveInstancesRequest', + 'InstanceGroupsScopedList', + 'InstanceGroupsSetNamedPortsRequest', + 'InstanceList', + 'InstanceListReferrers', + 'InstanceManagedByIgmError', + 'InstanceManagedByIgmErrorInstanceActionDetails', + 'InstanceManagedByIgmErrorManagedInstanceError', + 'InstanceMoveRequest', + 'InstanceProperties', + 'InstanceReference', + 'InstancesAddResourcePoliciesRequest', + 'InstancesGetEffectiveFirewallsResponse', + 'InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + 'InstancesRemoveResourcePoliciesRequest', + 'InstancesScopedList', + 'InstancesSetLabelsRequest', + 'InstancesSetMachineResourcesRequest', + 'InstancesSetMachineTypeRequest', + 'InstancesSetMinCpuPlatformRequest', + 'InstancesSetServiceAccountRequest', + 'InstancesStartWithEncryptionKeyRequest', + 'InstanceTemplate', + 'InstanceTemplateList', + 'InstanceWithNamedPorts', + 'Int64RangeMatch', + 'Interconnect', + 'InterconnectAttachment', + 'InterconnectAttachmentAggregatedList', + 'InterconnectAttachmentList', + 'InterconnectAttachmentPartnerMetadata', + 'InterconnectAttachmentPrivateInfo', + 'InterconnectAttachmentsScopedList', + 'InterconnectCircuitInfo', + 'InterconnectDiagnostics', + 'InterconnectDiagnosticsARPEntry', + 'InterconnectDiagnosticsLinkLACPStatus', + 'InterconnectDiagnosticsLinkOpticalPower', + 'InterconnectDiagnosticsLinkStatus', + 'InterconnectList', + 'InterconnectLocation', + 'InterconnectLocationList', + 'InterconnectLocationRegionInfo', + 'InterconnectOutageNotification', + 'InterconnectsGetDiagnosticsResponse', + 'InvalidateCacheUrlMapRequest', + 'Items', + 'License', + 'LicenseCode', + 'LicenseCodeLicenseAlias', + 'LicenseResourceCommitment', + 'LicenseResourceRequirements', + 'LicensesListResponse', + 'ListAcceleratorTypesRequest', + 'ListAddressesRequest', + 'ListAssociationsFirewallPolicyRequest', + 'ListAutoscalersRequest', + 'ListAvailableFeaturesSslPoliciesRequest', + 'ListBackendBucketsRequest', + 'ListBackendServicesRequest', + 'ListDisksRequest', + 'ListDiskTypesRequest', + 'ListErrorsInstanceGroupManagersRequest', + 'ListErrorsRegionInstanceGroupManagersRequest', + 'ListExternalVpnGatewaysRequest', + 'ListFirewallPoliciesRequest', + 'ListFirewallsRequest', + 'ListForwardingRulesRequest', + 'ListGlobalAddressesRequest', + 'ListGlobalForwardingRulesRequest', + 'ListGlobalNetworkEndpointGroupsRequest', + 'ListGlobalOperationsRequest', + 'ListGlobalOrganizationOperationsRequest', + 'ListGlobalPublicDelegatedPrefixesRequest', + 'ListHealthChecksRequest', + 'ListImagesRequest', + 'ListInstanceGroupManagersRequest', + 'ListInstanceGroupsRequest', + 'ListInstancesInstanceGroupsRequest', + 'ListInstancesRegionInstanceGroupsRequest', + 'ListInstancesRequest', + 'ListInstanceTemplatesRequest', + 'ListInterconnectAttachmentsRequest', + 'ListInterconnectLocationsRequest', + 'ListInterconnectsRequest', + 'ListLicensesRequest', + 'ListMachineTypesRequest', + 'ListManagedInstancesInstanceGroupManagersRequest', + 'ListManagedInstancesRegionInstanceGroupManagersRequest', + 'ListNetworkEndpointGroupsRequest', + 'ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest', + 'ListNetworkEndpointsNetworkEndpointGroupsRequest', + 'ListNetworksRequest', + 'ListNodeGroupsRequest', + 'ListNodesNodeGroupsRequest', + 'ListNodeTemplatesRequest', + 'ListNodeTypesRequest', + 'ListPacketMirroringsRequest', + 'ListPeeringRoutesNetworksRequest', + 'ListPerInstanceConfigsInstanceGroupManagersRequest', + 'ListPerInstanceConfigsRegionInstanceGroupManagersRequest', + 'ListPreconfiguredExpressionSetsSecurityPoliciesRequest', + 'ListPublicAdvertisedPrefixesRequest', + 'ListPublicDelegatedPrefixesRequest', + 'ListReferrersInstancesRequest', + 'ListRegionAutoscalersRequest', + 'ListRegionBackendServicesRequest', + 'ListRegionCommitmentsRequest', + 'ListRegionDisksRequest', + 'ListRegionDiskTypesRequest', + 'ListRegionHealthCheckServicesRequest', + 'ListRegionHealthChecksRequest', + 'ListRegionInstanceGroupManagersRequest', + 'ListRegionInstanceGroupsRequest', + 'ListRegionNetworkEndpointGroupsRequest', + 'ListRegionNotificationEndpointsRequest', + 'ListRegionOperationsRequest', + 'ListRegionsRequest', + 'ListRegionSslCertificatesRequest', + 'ListRegionTargetHttpProxiesRequest', + 'ListRegionTargetHttpsProxiesRequest', + 'ListRegionUrlMapsRequest', + 'ListReservationsRequest', + 'ListResourcePoliciesRequest', + 'ListRoutersRequest', + 'ListRoutesRequest', + 'ListSecurityPoliciesRequest', + 'ListServiceAttachmentsRequest', + 'ListSnapshotsRequest', + 'ListSslCertificatesRequest', + 'ListSslPoliciesRequest', + 'ListSubnetworksRequest', + 'ListTargetGrpcProxiesRequest', + 'ListTargetHttpProxiesRequest', + 'ListTargetHttpsProxiesRequest', + 'ListTargetInstancesRequest', + 'ListTargetPoolsRequest', + 'ListTargetSslProxiesRequest', + 'ListTargetTcpProxiesRequest', + 'ListTargetVpnGatewaysRequest', + 'ListUrlMapsRequest', + 'ListUsableSubnetworksRequest', + 'ListVpnGatewaysRequest', + 'ListVpnTunnelsRequest', + 'ListXpnHostsProjectsRequest', + 'ListZoneOperationsRequest', + 'ListZonesRequest', + 'LocalDisk', + 'LocationPolicy', + 'LocationPolicyLocation', + 'LogConfig', + 'LogConfigCloudAuditOptions', + 'LogConfigCounterOptions', + 'LogConfigCounterOptionsCustomField', + 'LogConfigDataAccessOptions', + 'MachineType', + 'MachineTypeAggregatedList', + 'MachineTypeList', + 'MachineTypesScopedList', + 'ManagedInstance', + 'ManagedInstanceInstanceHealth', + 'ManagedInstanceLastAttempt', + 'ManagedInstanceVersion', + 'Metadata', + 'MetadataFilter', + 'MetadataFilterLabelMatch', + 'MoveDiskProjectRequest', + 'MoveFirewallPolicyRequest', + 'MoveInstanceProjectRequest', + 'NamedPort', + 'Network', + 'NetworkEndpoint', + 'NetworkEndpointGroup', + 'NetworkEndpointGroupAggregatedList', + 'NetworkEndpointGroupAppEngine', + 'NetworkEndpointGroupCloudFunction', + 'NetworkEndpointGroupCloudRun', + 'NetworkEndpointGroupList', + 'NetworkEndpointGroupsAttachEndpointsRequest', + 'NetworkEndpointGroupsDetachEndpointsRequest', + 'NetworkEndpointGroupsListEndpointsRequest', + 'NetworkEndpointGroupsListNetworkEndpoints', + 'NetworkEndpointGroupsScopedList', + 'NetworkEndpointWithHealthStatus', + 'NetworkInterface', + 'NetworkList', + 'NetworkPeering', + 'NetworkRoutingConfig', + 'NetworksAddPeeringRequest', + 'NetworksGetEffectiveFirewallsResponse', + 'NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + 'NetworksRemovePeeringRequest', + 'NetworksUpdatePeeringRequest', + 'NodeGroup', + 'NodeGroupAggregatedList', + 'NodeGroupAutoscalingPolicy', + 'NodeGroupList', + 'NodeGroupMaintenanceWindow', + 'NodeGroupNode', + 'NodeGroupsAddNodesRequest', + 'NodeGroupsDeleteNodesRequest', + 'NodeGroupsListNodes', + 'NodeGroupsScopedList', + 'NodeGroupsSetNodeTemplateRequest', + 'NodeTemplate', + 'NodeTemplateAggregatedList', + 'NodeTemplateList', + 'NodeTemplateNodeTypeFlexibility', + 'NodeTemplatesScopedList', + 'NodeType', + 'NodeTypeAggregatedList', + 'NodeTypeList', + 'NodeTypesScopedList', + 'NotificationEndpoint', + 'NotificationEndpointGrpcSettings', + 'NotificationEndpointList', + 'Operation', + 'OperationAggregatedList', + 'OperationList', + 'OperationsScopedList', + 'OutlierDetection', + 'PacketMirroring', + 'PacketMirroringAggregatedList', + 'PacketMirroringFilter', + 'PacketMirroringForwardingRuleInfo', + 'PacketMirroringList', + 'PacketMirroringMirroredResourceInfo', + 'PacketMirroringMirroredResourceInfoInstanceInfo', + 'PacketMirroringMirroredResourceInfoSubnetInfo', + 'PacketMirroringNetworkInfo', + 'PacketMirroringsScopedList', + 'PatchAutoscalerRequest', + 'PatchBackendBucketRequest', + 'PatchBackendServiceRequest', + 'PatchFirewallPolicyRequest', + 'PatchFirewallRequest', + 'PatchForwardingRuleRequest', + 'PatchGlobalForwardingRuleRequest', + 'PatchGlobalPublicDelegatedPrefixeRequest', + 'PatchHealthCheckRequest', + 'PatchImageRequest', + 'PatchInstanceGroupManagerRequest', + 'PatchInterconnectAttachmentRequest', + 'PatchInterconnectRequest', + 'PatchNetworkRequest', + 'PatchNodeGroupRequest', + 'PatchPacketMirroringRequest', + 'PatchPerInstanceConfigsInstanceGroupManagerRequest', + 'PatchPerInstanceConfigsRegionInstanceGroupManagerRequest', + 'PatchPublicAdvertisedPrefixeRequest', + 'PatchPublicDelegatedPrefixeRequest', + 'PatchRegionAutoscalerRequest', + 'PatchRegionBackendServiceRequest', + 'PatchRegionHealthCheckRequest', + 'PatchRegionHealthCheckServiceRequest', + 'PatchRegionInstanceGroupManagerRequest', + 'PatchRegionUrlMapRequest', + 'PatchRouterRequest', + 'PatchRuleFirewallPolicyRequest', + 'PatchRuleSecurityPolicyRequest', + 'PatchSecurityPolicyRequest', + 'PatchServiceAttachmentRequest', + 'PatchSslPolicyRequest', + 'PatchSubnetworkRequest', + 'PatchTargetGrpcProxyRequest', + 'PatchTargetHttpProxyRequest', + 'PatchTargetHttpsProxyRequest', + 'PatchUrlMapRequest', + 'PathMatcher', + 'PathRule', + 'PerInstanceConfig', + 'Policy', + 'PreconfiguredWafSet', + 'PreservedState', + 'PreservedStatePreservedDisk', + 'PreviewRouterRequest', + 'Project', + 'ProjectsDisableXpnResourceRequest', + 'ProjectsEnableXpnResourceRequest', + 'ProjectsGetXpnResources', + 'ProjectsListXpnHostsRequest', + 'ProjectsSetDefaultNetworkTierRequest', + 'PublicAdvertisedPrefix', + 'PublicAdvertisedPrefixList', + 'PublicAdvertisedPrefixPublicDelegatedPrefix', + 'PublicDelegatedPrefix', + 'PublicDelegatedPrefixAggregatedList', + 'PublicDelegatedPrefixesScopedList', + 'PublicDelegatedPrefixList', + 'PublicDelegatedPrefixPublicDelegatedSubPrefix', + 'Quota', + 'RawDisk', + 'RecreateInstancesInstanceGroupManagerRequest', + 'RecreateInstancesRegionInstanceGroupManagerRequest', + 'Reference', + 'Region', + 'RegionAutoscalerList', + 'RegionDisksAddResourcePoliciesRequest', + 'RegionDisksRemoveResourcePoliciesRequest', + 'RegionDisksResizeRequest', + 'RegionDiskTypeList', + 'RegionInstanceGroupList', + 'RegionInstanceGroupManagerDeleteInstanceConfigReq', + 'RegionInstanceGroupManagerList', + 'RegionInstanceGroupManagerPatchInstanceConfigReq', + 'RegionInstanceGroupManagersAbandonInstancesRequest', + 'RegionInstanceGroupManagersApplyUpdatesRequest', + 'RegionInstanceGroupManagersCreateInstancesRequest', + 'RegionInstanceGroupManagersDeleteInstancesRequest', + 'RegionInstanceGroupManagersListErrorsResponse', + 'RegionInstanceGroupManagersListInstanceConfigsResp', + 'RegionInstanceGroupManagersListInstancesResponse', + 'RegionInstanceGroupManagersRecreateRequest', + 'RegionInstanceGroupManagersSetTargetPoolsRequest', + 'RegionInstanceGroupManagersSetTemplateRequest', + 'RegionInstanceGroupManagerUpdateInstanceConfigReq', + 'RegionInstanceGroupsListInstances', + 'RegionInstanceGroupsListInstancesRequest', + 'RegionInstanceGroupsSetNamedPortsRequest', + 'RegionList', + 'RegionSetLabelsRequest', + 'RegionSetPolicyRequest', + 'RegionTargetHttpsProxiesSetSslCertificatesRequest', + 'RegionUrlMapsValidateRequest', + 'RemoveAssociationFirewallPolicyRequest', + 'RemoveHealthCheckTargetPoolRequest', + 'RemoveInstancesInstanceGroupRequest', + 'RemoveInstanceTargetPoolRequest', + 'RemovePeeringNetworkRequest', + 'RemoveResourcePoliciesDiskRequest', + 'RemoveResourcePoliciesInstanceRequest', + 'RemoveResourcePoliciesRegionDiskRequest', + 'RemoveRuleFirewallPolicyRequest', + 'RemoveRuleSecurityPolicyRequest', + 'RequestMirrorPolicy', + 'Reservation', + 'ReservationAffinity', + 'ReservationAggregatedList', + 'ReservationList', + 'ReservationsResizeRequest', + 'ReservationsScopedList', + 'ResetInstanceRequest', + 'ResizeDiskRequest', + 'ResizeInstanceGroupManagerRequest', + 'ResizeRegionDiskRequest', + 'ResizeRegionInstanceGroupManagerRequest', + 'ResizeReservationRequest', + 'ResourceCommitment', + 'ResourceGroupReference', + 'ResourcePoliciesScopedList', + 'ResourcePolicy', + 'ResourcePolicyAggregatedList', + 'ResourcePolicyDailyCycle', + 'ResourcePolicyGroupPlacementPolicy', + 'ResourcePolicyHourlyCycle', + 'ResourcePolicyInstanceSchedulePolicy', + 'ResourcePolicyInstanceSchedulePolicySchedule', + 'ResourcePolicyList', + 'ResourcePolicyResourceStatus', + 'ResourcePolicyResourceStatusInstanceSchedulePolicyStatus', + 'ResourcePolicySnapshotSchedulePolicy', + 'ResourcePolicySnapshotSchedulePolicyRetentionPolicy', + 'ResourcePolicySnapshotSchedulePolicySchedule', + 'ResourcePolicySnapshotSchedulePolicySnapshotProperties', + 'ResourcePolicyWeeklyCycle', + 'ResourcePolicyWeeklyCycleDayOfWeek', + 'Route', + 'RouteAsPath', + 'RouteList', + 'Router', + 'RouterAdvertisedIpRange', + 'RouterAggregatedList', + 'RouterBgp', + 'RouterBgpPeer', + 'RouterBgpPeerBfd', + 'RouterInterface', + 'RouterList', + 'RouterNat', + 'RouterNatLogConfig', + 'RouterNatRule', + 'RouterNatRuleAction', + 'RouterNatSubnetworkToNat', + 'RoutersPreviewResponse', + 'RoutersScopedList', + 'RouterStatus', + 'RouterStatusBgpPeerStatus', + 'RouterStatusNatStatus', + 'RouterStatusNatStatusNatRuleStatus', + 'RouterStatusResponse', + 'Rule', + 'ScalingScheduleStatus', + 'Scheduling', + 'SchedulingNodeAffinity', + 'ScratchDisks', + 'Screenshot', + 'SecurityPoliciesListPreconfiguredExpressionSetsResponse', + 'SecurityPoliciesWafConfig', + 'SecurityPolicy', + 'SecurityPolicyAdaptiveProtectionConfig', + 'SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig', + 'SecurityPolicyAdvancedOptionsConfig', + 'SecurityPolicyList', + 'SecurityPolicyReference', + 'SecurityPolicyRule', + 'SecurityPolicyRuleMatcher', + 'SecurityPolicyRuleMatcherConfig', + 'SecuritySettings', + 'SendDiagnosticInterruptInstanceRequest', + 'SendDiagnosticInterruptInstanceResponse', + 'SerialPortOutput', + 'ServerBinding', + 'ServiceAccount', + 'ServiceAttachment', + 'ServiceAttachmentAggregatedList', + 'ServiceAttachmentConnectedEndpoint', + 'ServiceAttachmentConsumerProjectLimit', + 'ServiceAttachmentList', + 'ServiceAttachmentsScopedList', + 'SetBackendServiceTargetSslProxyRequest', + 'SetBackendServiceTargetTcpProxyRequest', + 'SetBackupTargetPoolRequest', + 'SetCommonInstanceMetadataProjectRequest', + 'SetDefaultNetworkTierProjectRequest', + 'SetDeletionProtectionInstanceRequest', + 'SetDiskAutoDeleteInstanceRequest', + 'SetIamPolicyDiskRequest', + 'SetIamPolicyFirewallPolicyRequest', + 'SetIamPolicyImageRequest', + 'SetIamPolicyInstanceRequest', + 'SetIamPolicyInstanceTemplateRequest', + 'SetIamPolicyLicenseRequest', + 'SetIamPolicyNodeGroupRequest', + 'SetIamPolicyNodeTemplateRequest', + 'SetIamPolicyRegionDiskRequest', + 'SetIamPolicyReservationRequest', + 'SetIamPolicyResourcePolicyRequest', + 'SetIamPolicyServiceAttachmentRequest', + 'SetIamPolicySnapshotRequest', + 'SetIamPolicySubnetworkRequest', + 'SetInstanceTemplateInstanceGroupManagerRequest', + 'SetInstanceTemplateRegionInstanceGroupManagerRequest', + 'SetLabelsDiskRequest', + 'SetLabelsExternalVpnGatewayRequest', + 'SetLabelsForwardingRuleRequest', + 'SetLabelsGlobalForwardingRuleRequest', + 'SetLabelsImageRequest', + 'SetLabelsInstanceRequest', + 'SetLabelsRegionDiskRequest', + 'SetLabelsSnapshotRequest', + 'SetLabelsVpnGatewayRequest', + 'SetMachineResourcesInstanceRequest', + 'SetMachineTypeInstanceRequest', + 'SetMetadataInstanceRequest', + 'SetMinCpuPlatformInstanceRequest', + 'SetNamedPortsInstanceGroupRequest', + 'SetNamedPortsRegionInstanceGroupRequest', + 'SetNodeTemplateNodeGroupRequest', + 'SetPrivateIpGoogleAccessSubnetworkRequest', + 'SetProxyHeaderTargetSslProxyRequest', + 'SetProxyHeaderTargetTcpProxyRequest', + 'SetQuicOverrideTargetHttpsProxyRequest', + 'SetSchedulingInstanceRequest', + 'SetSecurityPolicyBackendServiceRequest', + 'SetServiceAccountInstanceRequest', + 'SetShieldedInstanceIntegrityPolicyInstanceRequest', + 'SetSslCertificatesRegionTargetHttpsProxyRequest', + 'SetSslCertificatesTargetHttpsProxyRequest', + 'SetSslCertificatesTargetSslProxyRequest', + 'SetSslPolicyTargetHttpsProxyRequest', + 'SetSslPolicyTargetSslProxyRequest', + 'SetTagsInstanceRequest', + 'SetTargetForwardingRuleRequest', + 'SetTargetGlobalForwardingRuleRequest', + 'SetTargetPoolsInstanceGroupManagerRequest', + 'SetTargetPoolsRegionInstanceGroupManagerRequest', + 'SetUrlMapRegionTargetHttpProxyRequest', + 'SetUrlMapRegionTargetHttpsProxyRequest', + 'SetUrlMapTargetHttpProxyRequest', + 'SetUrlMapTargetHttpsProxyRequest', + 'SetUsageExportBucketProjectRequest', + 'ShieldedInstanceConfig', + 'ShieldedInstanceIdentity', + 'ShieldedInstanceIdentityEntry', + 'ShieldedInstanceIntegrityPolicy', + 'SignedUrlKey', + 'SimulateMaintenanceEventInstanceRequest', + 'Snapshot', + 'SnapshotList', + 'SourceInstanceParams', + 'SslCertificate', + 'SslCertificateAggregatedList', + 'SslCertificateList', + 'SslCertificateManagedSslCertificate', + 'SslCertificateSelfManagedSslCertificate', + 'SslCertificatesScopedList', + 'SSLHealthCheck', + 'SslPoliciesList', + 'SslPoliciesListAvailableFeaturesResponse', + 'SslPolicy', + 'SslPolicyReference', + 'StartInstanceRequest', + 'StartWithEncryptionKeyInstanceRequest', + 'StatefulPolicy', + 'StatefulPolicyPreservedState', + 'StatefulPolicyPreservedStateDiskDevice', + 'StopInstanceRequest', + 'Subnetwork', + 'SubnetworkAggregatedList', + 'SubnetworkList', + 'SubnetworkLogConfig', + 'SubnetworkSecondaryRange', + 'SubnetworksExpandIpCidrRangeRequest', + 'SubnetworksScopedList', + 'SubnetworksSetPrivateIpGoogleAccessRequest', + 'Subsetting', + 'SwitchToCustomModeNetworkRequest', + 'Tags', + 'TargetGrpcProxy', + 'TargetGrpcProxyList', + 'TargetHttpProxiesScopedList', + 'TargetHttpProxy', + 'TargetHttpProxyAggregatedList', + 'TargetHttpProxyList', + 'TargetHttpsProxiesScopedList', + 'TargetHttpsProxiesSetQuicOverrideRequest', + 'TargetHttpsProxiesSetSslCertificatesRequest', + 'TargetHttpsProxy', + 'TargetHttpsProxyAggregatedList', + 'TargetHttpsProxyList', + 'TargetInstance', + 'TargetInstanceAggregatedList', + 'TargetInstanceList', + 'TargetInstancesScopedList', + 'TargetPool', + 'TargetPoolAggregatedList', + 'TargetPoolInstanceHealth', + 'TargetPoolList', + 'TargetPoolsAddHealthCheckRequest', + 'TargetPoolsAddInstanceRequest', + 'TargetPoolsRemoveHealthCheckRequest', + 'TargetPoolsRemoveInstanceRequest', + 'TargetPoolsScopedList', + 'TargetReference', + 'TargetSslProxiesSetBackendServiceRequest', + 'TargetSslProxiesSetProxyHeaderRequest', + 'TargetSslProxiesSetSslCertificatesRequest', + 'TargetSslProxy', + 'TargetSslProxyList', + 'TargetTcpProxiesSetBackendServiceRequest', + 'TargetTcpProxiesSetProxyHeaderRequest', + 'TargetTcpProxy', + 'TargetTcpProxyList', + 'TargetVpnGateway', + 'TargetVpnGatewayAggregatedList', + 'TargetVpnGatewayList', + 'TargetVpnGatewaysScopedList', + 'TCPHealthCheck', + 'TestFailure', + 'TestIamPermissionsDiskRequest', + 'TestIamPermissionsExternalVpnGatewayRequest', + 'TestIamPermissionsFirewallPolicyRequest', + 'TestIamPermissionsImageRequest', + 'TestIamPermissionsInstanceRequest', + 'TestIamPermissionsInstanceTemplateRequest', + 'TestIamPermissionsLicenseCodeRequest', + 'TestIamPermissionsLicenseRequest', + 'TestIamPermissionsNetworkEndpointGroupRequest', + 'TestIamPermissionsNodeGroupRequest', + 'TestIamPermissionsNodeTemplateRequest', + 'TestIamPermissionsPacketMirroringRequest', + 'TestIamPermissionsRegionDiskRequest', + 'TestIamPermissionsReservationRequest', + 'TestIamPermissionsResourcePolicyRequest', + 'TestIamPermissionsServiceAttachmentRequest', + 'TestIamPermissionsSnapshotRequest', + 'TestIamPermissionsSubnetworkRequest', + 'TestIamPermissionsVpnGatewayRequest', + 'TestPermissionsRequest', + 'TestPermissionsResponse', + 'Uint128', + 'UpdateAccessConfigInstanceRequest', + 'UpdateAutoscalerRequest', + 'UpdateBackendBucketRequest', + 'UpdateBackendServiceRequest', + 'UpdateDisplayDeviceInstanceRequest', + 'UpdateFirewallRequest', + 'UpdateHealthCheckRequest', + 'UpdateInstanceRequest', + 'UpdateNetworkInterfaceInstanceRequest', + 'UpdatePeeringNetworkRequest', + 'UpdatePerInstanceConfigsInstanceGroupManagerRequest', + 'UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest', + 'UpdateRegionAutoscalerRequest', + 'UpdateRegionBackendServiceRequest', + 'UpdateRegionHealthCheckRequest', + 'UpdateRegionUrlMapRequest', + 'UpdateRouterRequest', + 'UpdateShieldedInstanceConfigInstanceRequest', + 'UpdateUrlMapRequest', + 'UrlMap', + 'UrlMapList', + 'UrlMapReference', + 'UrlMapsAggregatedList', + 'UrlMapsScopedList', + 'UrlMapsValidateRequest', + 'UrlMapsValidateResponse', + 'UrlMapTest', + 'UrlMapTestHeader', + 'UrlMapValidationResult', + 'UrlRewrite', + 'UsableSubnetwork', + 'UsableSubnetworksAggregatedList', + 'UsableSubnetworkSecondaryRange', + 'UsageExportLocation', + 'ValidateRegionUrlMapRequest', + 'ValidateUrlMapRequest', + 'VmEndpointNatMappings', + 'VmEndpointNatMappingsInterfaceNatMappings', + 'VmEndpointNatMappingsList', + 'VpnGateway', + 'VpnGatewayAggregatedList', + 'VpnGatewayList', + 'VpnGatewaysGetStatusResponse', + 'VpnGatewaysScopedList', + 'VpnGatewayStatus', + 'VpnGatewayStatusHighAvailabilityRequirementState', + 'VpnGatewayStatusTunnel', + 'VpnGatewayStatusVpnConnection', + 'VpnGatewayVpnGatewayInterface', + 'VpnTunnel', + 'VpnTunnelAggregatedList', + 'VpnTunnelList', + 'VpnTunnelsScopedList', + 'WafExpressionSet', + 'WafExpressionSetExpression', + 'WaitGlobalOperationRequest', + 'WaitRegionOperationRequest', + 'WaitZoneOperationRequest', + 'Warning', + 'Warnings', + 'WeightedBackendService', + 'XpnHostList', + 'XpnResourceId', + 'Zone', + 'ZoneList', + 'ZoneSetLabelsRequest', + 'ZoneSetPolicyRequest', +) diff --git a/owl-bot-staging/v1/google/cloud/compute_v1/types/compute.py b/owl-bot-staging/v1/google/cloud/compute_v1/types/compute.py new file mode 100644 index 000000000..0b96f9152 --- /dev/null +++ b/owl-bot-staging/v1/google/cloud/compute_v1/types/compute.py @@ -0,0 +1,73811 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.cloud.compute.v1', + manifest={ + 'AbandonInstancesInstanceGroupManagerRequest', + 'AbandonInstancesRegionInstanceGroupManagerRequest', + 'AcceleratorConfig', + 'AcceleratorType', + 'AcceleratorTypeAggregatedList', + 'AcceleratorTypeList', + 'AcceleratorTypesScopedList', + 'Accelerators', + 'AccessConfig', + 'AddAccessConfigInstanceRequest', + 'AddAssociationFirewallPolicyRequest', + 'AddHealthCheckTargetPoolRequest', + 'AddInstanceTargetPoolRequest', + 'AddInstancesInstanceGroupRequest', + 'AddNodesNodeGroupRequest', + 'AddPeeringNetworkRequest', + 'AddResourcePoliciesDiskRequest', + 'AddResourcePoliciesInstanceRequest', + 'AddResourcePoliciesRegionDiskRequest', + 'AddRuleFirewallPolicyRequest', + 'AddRuleSecurityPolicyRequest', + 'AddSignedUrlKeyBackendBucketRequest', + 'AddSignedUrlKeyBackendServiceRequest', + 'Address', + 'AddressAggregatedList', + 'AddressList', + 'AddressesScopedList', + 'AdvancedMachineFeatures', + 'AggregatedListAcceleratorTypesRequest', + 'AggregatedListAddressesRequest', + 'AggregatedListAutoscalersRequest', + 'AggregatedListBackendServicesRequest', + 'AggregatedListDiskTypesRequest', + 'AggregatedListDisksRequest', + 'AggregatedListForwardingRulesRequest', + 'AggregatedListGlobalOperationsRequest', + 'AggregatedListHealthChecksRequest', + 'AggregatedListInstanceGroupManagersRequest', + 'AggregatedListInstanceGroupsRequest', + 'AggregatedListInstancesRequest', + 'AggregatedListInterconnectAttachmentsRequest', + 'AggregatedListMachineTypesRequest', + 'AggregatedListNetworkEndpointGroupsRequest', + 'AggregatedListNodeGroupsRequest', + 'AggregatedListNodeTemplatesRequest', + 'AggregatedListNodeTypesRequest', + 'AggregatedListPacketMirroringsRequest', + 'AggregatedListPublicDelegatedPrefixesRequest', + 'AggregatedListRegionCommitmentsRequest', + 'AggregatedListReservationsRequest', + 'AggregatedListResourcePoliciesRequest', + 'AggregatedListRoutersRequest', + 'AggregatedListServiceAttachmentsRequest', + 'AggregatedListSslCertificatesRequest', + 'AggregatedListSubnetworksRequest', + 'AggregatedListTargetHttpProxiesRequest', + 'AggregatedListTargetHttpsProxiesRequest', + 'AggregatedListTargetInstancesRequest', + 'AggregatedListTargetPoolsRequest', + 'AggregatedListTargetVpnGatewaysRequest', + 'AggregatedListUrlMapsRequest', + 'AggregatedListVpnGatewaysRequest', + 'AggregatedListVpnTunnelsRequest', + 'AliasIpRange', + 'AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk', + 'AllocationSpecificSKUAllocationReservedInstanceProperties', + 'AllocationSpecificSKUReservation', + 'Allowed', + 'ApplyUpdatesToInstancesInstanceGroupManagerRequest', + 'ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest', + 'AttachDiskInstanceRequest', + 'AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest', + 'AttachNetworkEndpointsNetworkEndpointGroupRequest', + 'AttachedDisk', + 'AttachedDiskInitializeParams', + 'AuditConfig', + 'AuditLogConfig', + 'AuthorizationLoggingOptions', + 'Autoscaler', + 'AutoscalerAggregatedList', + 'AutoscalerList', + 'AutoscalerStatusDetails', + 'AutoscalersScopedList', + 'AutoscalingPolicy', + 'AutoscalingPolicyCpuUtilization', + 'AutoscalingPolicyCustomMetricUtilization', + 'AutoscalingPolicyLoadBalancingUtilization', + 'AutoscalingPolicyScaleInControl', + 'AutoscalingPolicyScalingSchedule', + 'Backend', + 'BackendBucket', + 'BackendBucketCdnPolicy', + 'BackendBucketCdnPolicyBypassCacheOnRequestHeader', + 'BackendBucketCdnPolicyNegativeCachingPolicy', + 'BackendBucketList', + 'BackendService', + 'BackendServiceAggregatedList', + 'BackendServiceCdnPolicy', + 'BackendServiceCdnPolicyBypassCacheOnRequestHeader', + 'BackendServiceCdnPolicyNegativeCachingPolicy', + 'BackendServiceFailoverPolicy', + 'BackendServiceGroupHealth', + 'BackendServiceIAP', + 'BackendServiceList', + 'BackendServiceLogConfig', + 'BackendServiceReference', + 'BackendServicesScopedList', + 'Binding', + 'BulkInsertInstanceRequest', + 'BulkInsertInstanceResource', + 'BulkInsertInstanceResourcePerInstanceProperties', + 'BulkInsertRegionInstanceRequest', + 'CacheInvalidationRule', + 'CacheKeyPolicy', + 'CircuitBreakers', + 'CloneRulesFirewallPolicyRequest', + 'Commitment', + 'CommitmentAggregatedList', + 'CommitmentList', + 'CommitmentsScopedList', + 'Condition', + 'ConfidentialInstanceConfig', + 'ConnectionDraining', + 'ConsistentHashLoadBalancerSettings', + 'ConsistentHashLoadBalancerSettingsHttpCookie', + 'CorsPolicy', + 'CreateInstancesInstanceGroupManagerRequest', + 'CreateInstancesRegionInstanceGroupManagerRequest', + 'CreateSnapshotDiskRequest', + 'CreateSnapshotRegionDiskRequest', + 'CustomerEncryptionKey', + 'CustomerEncryptionKeyProtectedDisk', + 'Data', + 'DeleteAccessConfigInstanceRequest', + 'DeleteAddressRequest', + 'DeleteAutoscalerRequest', + 'DeleteBackendBucketRequest', + 'DeleteBackendServiceRequest', + 'DeleteDiskRequest', + 'DeleteExternalVpnGatewayRequest', + 'DeleteFirewallPolicyRequest', + 'DeleteFirewallRequest', + 'DeleteForwardingRuleRequest', + 'DeleteGlobalAddressRequest', + 'DeleteGlobalForwardingRuleRequest', + 'DeleteGlobalNetworkEndpointGroupRequest', + 'DeleteGlobalOperationRequest', + 'DeleteGlobalOperationResponse', + 'DeleteGlobalOrganizationOperationRequest', + 'DeleteGlobalOrganizationOperationResponse', + 'DeleteGlobalPublicDelegatedPrefixeRequest', + 'DeleteHealthCheckRequest', + 'DeleteImageRequest', + 'DeleteInstanceGroupManagerRequest', + 'DeleteInstanceGroupRequest', + 'DeleteInstanceRequest', + 'DeleteInstanceTemplateRequest', + 'DeleteInstancesInstanceGroupManagerRequest', + 'DeleteInstancesRegionInstanceGroupManagerRequest', + 'DeleteInterconnectAttachmentRequest', + 'DeleteInterconnectRequest', + 'DeleteLicenseRequest', + 'DeleteNetworkEndpointGroupRequest', + 'DeleteNetworkRequest', + 'DeleteNodeGroupRequest', + 'DeleteNodeTemplateRequest', + 'DeleteNodesNodeGroupRequest', + 'DeletePacketMirroringRequest', + 'DeletePerInstanceConfigsInstanceGroupManagerRequest', + 'DeletePerInstanceConfigsRegionInstanceGroupManagerRequest', + 'DeletePublicAdvertisedPrefixeRequest', + 'DeletePublicDelegatedPrefixeRequest', + 'DeleteRegionAutoscalerRequest', + 'DeleteRegionBackendServiceRequest', + 'DeleteRegionDiskRequest', + 'DeleteRegionHealthCheckRequest', + 'DeleteRegionHealthCheckServiceRequest', + 'DeleteRegionInstanceGroupManagerRequest', + 'DeleteRegionNetworkEndpointGroupRequest', + 'DeleteRegionNotificationEndpointRequest', + 'DeleteRegionOperationRequest', + 'DeleteRegionOperationResponse', + 'DeleteRegionSslCertificateRequest', + 'DeleteRegionTargetHttpProxyRequest', + 'DeleteRegionTargetHttpsProxyRequest', + 'DeleteRegionUrlMapRequest', + 'DeleteReservationRequest', + 'DeleteResourcePolicyRequest', + 'DeleteRouteRequest', + 'DeleteRouterRequest', + 'DeleteSecurityPolicyRequest', + 'DeleteServiceAttachmentRequest', + 'DeleteSignedUrlKeyBackendBucketRequest', + 'DeleteSignedUrlKeyBackendServiceRequest', + 'DeleteSnapshotRequest', + 'DeleteSslCertificateRequest', + 'DeleteSslPolicyRequest', + 'DeleteSubnetworkRequest', + 'DeleteTargetGrpcProxyRequest', + 'DeleteTargetHttpProxyRequest', + 'DeleteTargetHttpsProxyRequest', + 'DeleteTargetInstanceRequest', + 'DeleteTargetPoolRequest', + 'DeleteTargetSslProxyRequest', + 'DeleteTargetTcpProxyRequest', + 'DeleteTargetVpnGatewayRequest', + 'DeleteUrlMapRequest', + 'DeleteVpnGatewayRequest', + 'DeleteVpnTunnelRequest', + 'DeleteZoneOperationRequest', + 'DeleteZoneOperationResponse', + 'Denied', + 'DeprecateImageRequest', + 'DeprecationStatus', + 'DetachDiskInstanceRequest', + 'DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest', + 'DetachNetworkEndpointsNetworkEndpointGroupRequest', + 'DisableXpnHostProjectRequest', + 'DisableXpnResourceProjectRequest', + 'Disk', + 'DiskAggregatedList', + 'DiskInstantiationConfig', + 'DiskList', + 'DiskMoveRequest', + 'DiskType', + 'DiskTypeAggregatedList', + 'DiskTypeList', + 'DiskTypesScopedList', + 'DisksAddResourcePoliciesRequest', + 'DisksRemoveResourcePoliciesRequest', + 'DisksResizeRequest', + 'DisksScopedList', + 'DisplayDevice', + 'DistributionPolicy', + 'DistributionPolicyZoneConfiguration', + 'Duration', + 'EnableXpnHostProjectRequest', + 'EnableXpnResourceProjectRequest', + 'Error', + 'Errors', + 'ExchangedPeeringRoute', + 'ExchangedPeeringRoutesList', + 'ExpandIpCidrRangeSubnetworkRequest', + 'Expr', + 'ExternalVpnGateway', + 'ExternalVpnGatewayInterface', + 'ExternalVpnGatewayList', + 'FileContentBuffer', + 'Firewall', + 'FirewallList', + 'FirewallLogConfig', + 'FirewallPoliciesListAssociationsResponse', + 'FirewallPolicy', + 'FirewallPolicyAssociation', + 'FirewallPolicyList', + 'FirewallPolicyRule', + 'FirewallPolicyRuleMatcher', + 'FirewallPolicyRuleMatcherLayer4Config', + 'FixedOrPercent', + 'ForwardingRule', + 'ForwardingRuleAggregatedList', + 'ForwardingRuleList', + 'ForwardingRuleReference', + 'ForwardingRuleServiceDirectoryRegistration', + 'ForwardingRulesScopedList', + 'GRPCHealthCheck', + 'GetAcceleratorTypeRequest', + 'GetAddressRequest', + 'GetAssociationFirewallPolicyRequest', + 'GetAutoscalerRequest', + 'GetBackendBucketRequest', + 'GetBackendServiceRequest', + 'GetDiagnosticsInterconnectRequest', + 'GetDiskRequest', + 'GetDiskTypeRequest', + 'GetEffectiveFirewallsInstanceRequest', + 'GetEffectiveFirewallsNetworkRequest', + 'GetExternalVpnGatewayRequest', + 'GetFirewallPolicyRequest', + 'GetFirewallRequest', + 'GetForwardingRuleRequest', + 'GetFromFamilyImageRequest', + 'GetGlobalAddressRequest', + 'GetGlobalForwardingRuleRequest', + 'GetGlobalNetworkEndpointGroupRequest', + 'GetGlobalOperationRequest', + 'GetGlobalOrganizationOperationRequest', + 'GetGlobalPublicDelegatedPrefixeRequest', + 'GetGuestAttributesInstanceRequest', + 'GetHealthBackendServiceRequest', + 'GetHealthCheckRequest', + 'GetHealthRegionBackendServiceRequest', + 'GetHealthTargetPoolRequest', + 'GetIamPolicyDiskRequest', + 'GetIamPolicyFirewallPolicyRequest', + 'GetIamPolicyImageRequest', + 'GetIamPolicyInstanceRequest', + 'GetIamPolicyInstanceTemplateRequest', + 'GetIamPolicyLicenseRequest', + 'GetIamPolicyNodeGroupRequest', + 'GetIamPolicyNodeTemplateRequest', + 'GetIamPolicyRegionDiskRequest', + 'GetIamPolicyReservationRequest', + 'GetIamPolicyResourcePolicyRequest', + 'GetIamPolicyServiceAttachmentRequest', + 'GetIamPolicySnapshotRequest', + 'GetIamPolicySubnetworkRequest', + 'GetImageFamilyViewRequest', + 'GetImageRequest', + 'GetInstanceGroupManagerRequest', + 'GetInstanceGroupRequest', + 'GetInstanceRequest', + 'GetInstanceTemplateRequest', + 'GetInterconnectAttachmentRequest', + 'GetInterconnectLocationRequest', + 'GetInterconnectRequest', + 'GetLicenseCodeRequest', + 'GetLicenseRequest', + 'GetMachineTypeRequest', + 'GetNatMappingInfoRoutersRequest', + 'GetNetworkEndpointGroupRequest', + 'GetNetworkRequest', + 'GetNodeGroupRequest', + 'GetNodeTemplateRequest', + 'GetNodeTypeRequest', + 'GetPacketMirroringRequest', + 'GetProjectRequest', + 'GetPublicAdvertisedPrefixeRequest', + 'GetPublicDelegatedPrefixeRequest', + 'GetRegionAutoscalerRequest', + 'GetRegionBackendServiceRequest', + 'GetRegionCommitmentRequest', + 'GetRegionDiskRequest', + 'GetRegionDiskTypeRequest', + 'GetRegionHealthCheckRequest', + 'GetRegionHealthCheckServiceRequest', + 'GetRegionInstanceGroupManagerRequest', + 'GetRegionInstanceGroupRequest', + 'GetRegionNetworkEndpointGroupRequest', + 'GetRegionNotificationEndpointRequest', + 'GetRegionOperationRequest', + 'GetRegionRequest', + 'GetRegionSslCertificateRequest', + 'GetRegionTargetHttpProxyRequest', + 'GetRegionTargetHttpsProxyRequest', + 'GetRegionUrlMapRequest', + 'GetReservationRequest', + 'GetResourcePolicyRequest', + 'GetRouteRequest', + 'GetRouterRequest', + 'GetRouterStatusRouterRequest', + 'GetRuleFirewallPolicyRequest', + 'GetRuleSecurityPolicyRequest', + 'GetScreenshotInstanceRequest', + 'GetSecurityPolicyRequest', + 'GetSerialPortOutputInstanceRequest', + 'GetServiceAttachmentRequest', + 'GetShieldedInstanceIdentityInstanceRequest', + 'GetSnapshotRequest', + 'GetSslCertificateRequest', + 'GetSslPolicyRequest', + 'GetStatusVpnGatewayRequest', + 'GetSubnetworkRequest', + 'GetTargetGrpcProxyRequest', + 'GetTargetHttpProxyRequest', + 'GetTargetHttpsProxyRequest', + 'GetTargetInstanceRequest', + 'GetTargetPoolRequest', + 'GetTargetSslProxyRequest', + 'GetTargetTcpProxyRequest', + 'GetTargetVpnGatewayRequest', + 'GetUrlMapRequest', + 'GetVpnGatewayRequest', + 'GetVpnTunnelRequest', + 'GetXpnHostProjectRequest', + 'GetXpnResourcesProjectsRequest', + 'GetZoneOperationRequest', + 'GetZoneRequest', + 'GlobalNetworkEndpointGroupsAttachEndpointsRequest', + 'GlobalNetworkEndpointGroupsDetachEndpointsRequest', + 'GlobalOrganizationSetPolicyRequest', + 'GlobalSetLabelsRequest', + 'GlobalSetPolicyRequest', + 'GuestAttributes', + 'GuestAttributesEntry', + 'GuestAttributesValue', + 'GuestOsFeature', + 'HTTP2HealthCheck', + 'HTTPHealthCheck', + 'HTTPSHealthCheck', + 'HealthCheck', + 'HealthCheckList', + 'HealthCheckLogConfig', + 'HealthCheckReference', + 'HealthCheckService', + 'HealthCheckServiceReference', + 'HealthCheckServicesList', + 'HealthChecksAggregatedList', + 'HealthChecksScopedList', + 'HealthStatus', + 'HealthStatusForNetworkEndpoint', + 'HostRule', + 'HttpFaultAbort', + 'HttpFaultDelay', + 'HttpFaultInjection', + 'HttpHeaderAction', + 'HttpHeaderMatch', + 'HttpHeaderOption', + 'HttpQueryParameterMatch', + 'HttpRedirectAction', + 'HttpRetryPolicy', + 'HttpRouteAction', + 'HttpRouteRule', + 'HttpRouteRuleMatch', + 'Image', + 'ImageFamilyView', + 'ImageList', + 'InitialStateConfig', + 'InsertAddressRequest', + 'InsertAutoscalerRequest', + 'InsertBackendBucketRequest', + 'InsertBackendServiceRequest', + 'InsertDiskRequest', + 'InsertExternalVpnGatewayRequest', + 'InsertFirewallPolicyRequest', + 'InsertFirewallRequest', + 'InsertForwardingRuleRequest', + 'InsertGlobalAddressRequest', + 'InsertGlobalForwardingRuleRequest', + 'InsertGlobalNetworkEndpointGroupRequest', + 'InsertGlobalPublicDelegatedPrefixeRequest', + 'InsertHealthCheckRequest', + 'InsertImageRequest', + 'InsertInstanceGroupManagerRequest', + 'InsertInstanceGroupRequest', + 'InsertInstanceRequest', + 'InsertInstanceTemplateRequest', + 'InsertInterconnectAttachmentRequest', + 'InsertInterconnectRequest', + 'InsertLicenseRequest', + 'InsertNetworkEndpointGroupRequest', + 'InsertNetworkRequest', + 'InsertNodeGroupRequest', + 'InsertNodeTemplateRequest', + 'InsertPacketMirroringRequest', + 'InsertPublicAdvertisedPrefixeRequest', + 'InsertPublicDelegatedPrefixeRequest', + 'InsertRegionAutoscalerRequest', + 'InsertRegionBackendServiceRequest', + 'InsertRegionCommitmentRequest', + 'InsertRegionDiskRequest', + 'InsertRegionHealthCheckRequest', + 'InsertRegionHealthCheckServiceRequest', + 'InsertRegionInstanceGroupManagerRequest', + 'InsertRegionNetworkEndpointGroupRequest', + 'InsertRegionNotificationEndpointRequest', + 'InsertRegionSslCertificateRequest', + 'InsertRegionTargetHttpProxyRequest', + 'InsertRegionTargetHttpsProxyRequest', + 'InsertRegionUrlMapRequest', + 'InsertReservationRequest', + 'InsertResourcePolicyRequest', + 'InsertRouteRequest', + 'InsertRouterRequest', + 'InsertSecurityPolicyRequest', + 'InsertServiceAttachmentRequest', + 'InsertSslCertificateRequest', + 'InsertSslPolicyRequest', + 'InsertSubnetworkRequest', + 'InsertTargetGrpcProxyRequest', + 'InsertTargetHttpProxyRequest', + 'InsertTargetHttpsProxyRequest', + 'InsertTargetInstanceRequest', + 'InsertTargetPoolRequest', + 'InsertTargetSslProxyRequest', + 'InsertTargetTcpProxyRequest', + 'InsertTargetVpnGatewayRequest', + 'InsertUrlMapRequest', + 'InsertVpnGatewayRequest', + 'InsertVpnTunnelRequest', + 'Instance', + 'InstanceAggregatedList', + 'InstanceGroup', + 'InstanceGroupAggregatedList', + 'InstanceGroupList', + 'InstanceGroupManager', + 'InstanceGroupManagerActionsSummary', + 'InstanceGroupManagerAggregatedList', + 'InstanceGroupManagerAutoHealingPolicy', + 'InstanceGroupManagerList', + 'InstanceGroupManagerStatus', + 'InstanceGroupManagerStatusStateful', + 'InstanceGroupManagerStatusStatefulPerInstanceConfigs', + 'InstanceGroupManagerStatusVersionTarget', + 'InstanceGroupManagerUpdatePolicy', + 'InstanceGroupManagerVersion', + 'InstanceGroupManagersAbandonInstancesRequest', + 'InstanceGroupManagersApplyUpdatesRequest', + 'InstanceGroupManagersCreateInstancesRequest', + 'InstanceGroupManagersDeleteInstancesRequest', + 'InstanceGroupManagersDeletePerInstanceConfigsReq', + 'InstanceGroupManagersListErrorsResponse', + 'InstanceGroupManagersListManagedInstancesResponse', + 'InstanceGroupManagersListPerInstanceConfigsResp', + 'InstanceGroupManagersPatchPerInstanceConfigsReq', + 'InstanceGroupManagersRecreateInstancesRequest', + 'InstanceGroupManagersScopedList', + 'InstanceGroupManagersSetInstanceTemplateRequest', + 'InstanceGroupManagersSetTargetPoolsRequest', + 'InstanceGroupManagersUpdatePerInstanceConfigsReq', + 'InstanceGroupsAddInstancesRequest', + 'InstanceGroupsListInstances', + 'InstanceGroupsListInstancesRequest', + 'InstanceGroupsRemoveInstancesRequest', + 'InstanceGroupsScopedList', + 'InstanceGroupsSetNamedPortsRequest', + 'InstanceList', + 'InstanceListReferrers', + 'InstanceManagedByIgmError', + 'InstanceManagedByIgmErrorInstanceActionDetails', + 'InstanceManagedByIgmErrorManagedInstanceError', + 'InstanceMoveRequest', + 'InstanceProperties', + 'InstanceReference', + 'InstanceTemplate', + 'InstanceTemplateList', + 'InstanceWithNamedPorts', + 'InstancesAddResourcePoliciesRequest', + 'InstancesGetEffectiveFirewallsResponse', + 'InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + 'InstancesRemoveResourcePoliciesRequest', + 'InstancesScopedList', + 'InstancesSetLabelsRequest', + 'InstancesSetMachineResourcesRequest', + 'InstancesSetMachineTypeRequest', + 'InstancesSetMinCpuPlatformRequest', + 'InstancesSetServiceAccountRequest', + 'InstancesStartWithEncryptionKeyRequest', + 'Int64RangeMatch', + 'Interconnect', + 'InterconnectAttachment', + 'InterconnectAttachmentAggregatedList', + 'InterconnectAttachmentList', + 'InterconnectAttachmentPartnerMetadata', + 'InterconnectAttachmentPrivateInfo', + 'InterconnectAttachmentsScopedList', + 'InterconnectCircuitInfo', + 'InterconnectDiagnostics', + 'InterconnectDiagnosticsARPEntry', + 'InterconnectDiagnosticsLinkLACPStatus', + 'InterconnectDiagnosticsLinkOpticalPower', + 'InterconnectDiagnosticsLinkStatus', + 'InterconnectList', + 'InterconnectLocation', + 'InterconnectLocationList', + 'InterconnectLocationRegionInfo', + 'InterconnectOutageNotification', + 'InterconnectsGetDiagnosticsResponse', + 'InvalidateCacheUrlMapRequest', + 'Items', + 'License', + 'LicenseCode', + 'LicenseCodeLicenseAlias', + 'LicenseResourceCommitment', + 'LicenseResourceRequirements', + 'LicensesListResponse', + 'ListAcceleratorTypesRequest', + 'ListAddressesRequest', + 'ListAssociationsFirewallPolicyRequest', + 'ListAutoscalersRequest', + 'ListAvailableFeaturesSslPoliciesRequest', + 'ListBackendBucketsRequest', + 'ListBackendServicesRequest', + 'ListDiskTypesRequest', + 'ListDisksRequest', + 'ListErrorsInstanceGroupManagersRequest', + 'ListErrorsRegionInstanceGroupManagersRequest', + 'ListExternalVpnGatewaysRequest', + 'ListFirewallPoliciesRequest', + 'ListFirewallsRequest', + 'ListForwardingRulesRequest', + 'ListGlobalAddressesRequest', + 'ListGlobalForwardingRulesRequest', + 'ListGlobalNetworkEndpointGroupsRequest', + 'ListGlobalOperationsRequest', + 'ListGlobalOrganizationOperationsRequest', + 'ListGlobalPublicDelegatedPrefixesRequest', + 'ListHealthChecksRequest', + 'ListImagesRequest', + 'ListInstanceGroupManagersRequest', + 'ListInstanceGroupsRequest', + 'ListInstanceTemplatesRequest', + 'ListInstancesInstanceGroupsRequest', + 'ListInstancesRegionInstanceGroupsRequest', + 'ListInstancesRequest', + 'ListInterconnectAttachmentsRequest', + 'ListInterconnectLocationsRequest', + 'ListInterconnectsRequest', + 'ListLicensesRequest', + 'ListMachineTypesRequest', + 'ListManagedInstancesInstanceGroupManagersRequest', + 'ListManagedInstancesRegionInstanceGroupManagersRequest', + 'ListNetworkEndpointGroupsRequest', + 'ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest', + 'ListNetworkEndpointsNetworkEndpointGroupsRequest', + 'ListNetworksRequest', + 'ListNodeGroupsRequest', + 'ListNodeTemplatesRequest', + 'ListNodeTypesRequest', + 'ListNodesNodeGroupsRequest', + 'ListPacketMirroringsRequest', + 'ListPeeringRoutesNetworksRequest', + 'ListPerInstanceConfigsInstanceGroupManagersRequest', + 'ListPerInstanceConfigsRegionInstanceGroupManagersRequest', + 'ListPreconfiguredExpressionSetsSecurityPoliciesRequest', + 'ListPublicAdvertisedPrefixesRequest', + 'ListPublicDelegatedPrefixesRequest', + 'ListReferrersInstancesRequest', + 'ListRegionAutoscalersRequest', + 'ListRegionBackendServicesRequest', + 'ListRegionCommitmentsRequest', + 'ListRegionDiskTypesRequest', + 'ListRegionDisksRequest', + 'ListRegionHealthCheckServicesRequest', + 'ListRegionHealthChecksRequest', + 'ListRegionInstanceGroupManagersRequest', + 'ListRegionInstanceGroupsRequest', + 'ListRegionNetworkEndpointGroupsRequest', + 'ListRegionNotificationEndpointsRequest', + 'ListRegionOperationsRequest', + 'ListRegionSslCertificatesRequest', + 'ListRegionTargetHttpProxiesRequest', + 'ListRegionTargetHttpsProxiesRequest', + 'ListRegionUrlMapsRequest', + 'ListRegionsRequest', + 'ListReservationsRequest', + 'ListResourcePoliciesRequest', + 'ListRoutersRequest', + 'ListRoutesRequest', + 'ListSecurityPoliciesRequest', + 'ListServiceAttachmentsRequest', + 'ListSnapshotsRequest', + 'ListSslCertificatesRequest', + 'ListSslPoliciesRequest', + 'ListSubnetworksRequest', + 'ListTargetGrpcProxiesRequest', + 'ListTargetHttpProxiesRequest', + 'ListTargetHttpsProxiesRequest', + 'ListTargetInstancesRequest', + 'ListTargetPoolsRequest', + 'ListTargetSslProxiesRequest', + 'ListTargetTcpProxiesRequest', + 'ListTargetVpnGatewaysRequest', + 'ListUrlMapsRequest', + 'ListUsableSubnetworksRequest', + 'ListVpnGatewaysRequest', + 'ListVpnTunnelsRequest', + 'ListXpnHostsProjectsRequest', + 'ListZoneOperationsRequest', + 'ListZonesRequest', + 'LocalDisk', + 'LocationPolicy', + 'LocationPolicyLocation', + 'LogConfig', + 'LogConfigCloudAuditOptions', + 'LogConfigCounterOptions', + 'LogConfigCounterOptionsCustomField', + 'LogConfigDataAccessOptions', + 'MachineType', + 'MachineTypeAggregatedList', + 'MachineTypeList', + 'MachineTypesScopedList', + 'ManagedInstance', + 'ManagedInstanceInstanceHealth', + 'ManagedInstanceLastAttempt', + 'ManagedInstanceVersion', + 'Metadata', + 'MetadataFilter', + 'MetadataFilterLabelMatch', + 'MoveDiskProjectRequest', + 'MoveFirewallPolicyRequest', + 'MoveInstanceProjectRequest', + 'NamedPort', + 'Network', + 'NetworkEndpoint', + 'NetworkEndpointGroup', + 'NetworkEndpointGroupAggregatedList', + 'NetworkEndpointGroupAppEngine', + 'NetworkEndpointGroupCloudFunction', + 'NetworkEndpointGroupCloudRun', + 'NetworkEndpointGroupList', + 'NetworkEndpointGroupsAttachEndpointsRequest', + 'NetworkEndpointGroupsDetachEndpointsRequest', + 'NetworkEndpointGroupsListEndpointsRequest', + 'NetworkEndpointGroupsListNetworkEndpoints', + 'NetworkEndpointGroupsScopedList', + 'NetworkEndpointWithHealthStatus', + 'NetworkInterface', + 'NetworkList', + 'NetworkPeering', + 'NetworkRoutingConfig', + 'NetworksAddPeeringRequest', + 'NetworksGetEffectiveFirewallsResponse', + 'NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + 'NetworksRemovePeeringRequest', + 'NetworksUpdatePeeringRequest', + 'NodeGroup', + 'NodeGroupAggregatedList', + 'NodeGroupAutoscalingPolicy', + 'NodeGroupList', + 'NodeGroupMaintenanceWindow', + 'NodeGroupNode', + 'NodeGroupsAddNodesRequest', + 'NodeGroupsDeleteNodesRequest', + 'NodeGroupsListNodes', + 'NodeGroupsScopedList', + 'NodeGroupsSetNodeTemplateRequest', + 'NodeTemplate', + 'NodeTemplateAggregatedList', + 'NodeTemplateList', + 'NodeTemplateNodeTypeFlexibility', + 'NodeTemplatesScopedList', + 'NodeType', + 'NodeTypeAggregatedList', + 'NodeTypeList', + 'NodeTypesScopedList', + 'NotificationEndpoint', + 'NotificationEndpointGrpcSettings', + 'NotificationEndpointList', + 'Operation', + 'OperationAggregatedList', + 'OperationList', + 'OperationsScopedList', + 'OutlierDetection', + 'PacketMirroring', + 'PacketMirroringAggregatedList', + 'PacketMirroringFilter', + 'PacketMirroringForwardingRuleInfo', + 'PacketMirroringList', + 'PacketMirroringMirroredResourceInfo', + 'PacketMirroringMirroredResourceInfoInstanceInfo', + 'PacketMirroringMirroredResourceInfoSubnetInfo', + 'PacketMirroringNetworkInfo', + 'PacketMirroringsScopedList', + 'PatchAutoscalerRequest', + 'PatchBackendBucketRequest', + 'PatchBackendServiceRequest', + 'PatchFirewallPolicyRequest', + 'PatchFirewallRequest', + 'PatchForwardingRuleRequest', + 'PatchGlobalForwardingRuleRequest', + 'PatchGlobalPublicDelegatedPrefixeRequest', + 'PatchHealthCheckRequest', + 'PatchImageRequest', + 'PatchInstanceGroupManagerRequest', + 'PatchInterconnectAttachmentRequest', + 'PatchInterconnectRequest', + 'PatchNetworkRequest', + 'PatchNodeGroupRequest', + 'PatchPacketMirroringRequest', + 'PatchPerInstanceConfigsInstanceGroupManagerRequest', + 'PatchPerInstanceConfigsRegionInstanceGroupManagerRequest', + 'PatchPublicAdvertisedPrefixeRequest', + 'PatchPublicDelegatedPrefixeRequest', + 'PatchRegionAutoscalerRequest', + 'PatchRegionBackendServiceRequest', + 'PatchRegionHealthCheckRequest', + 'PatchRegionHealthCheckServiceRequest', + 'PatchRegionInstanceGroupManagerRequest', + 'PatchRegionUrlMapRequest', + 'PatchRouterRequest', + 'PatchRuleFirewallPolicyRequest', + 'PatchRuleSecurityPolicyRequest', + 'PatchSecurityPolicyRequest', + 'PatchServiceAttachmentRequest', + 'PatchSslPolicyRequest', + 'PatchSubnetworkRequest', + 'PatchTargetGrpcProxyRequest', + 'PatchTargetHttpProxyRequest', + 'PatchTargetHttpsProxyRequest', + 'PatchUrlMapRequest', + 'PathMatcher', + 'PathRule', + 'PerInstanceConfig', + 'Policy', + 'PreconfiguredWafSet', + 'PreservedState', + 'PreservedStatePreservedDisk', + 'PreviewRouterRequest', + 'Project', + 'ProjectsDisableXpnResourceRequest', + 'ProjectsEnableXpnResourceRequest', + 'ProjectsGetXpnResources', + 'ProjectsListXpnHostsRequest', + 'ProjectsSetDefaultNetworkTierRequest', + 'PublicAdvertisedPrefix', + 'PublicAdvertisedPrefixList', + 'PublicAdvertisedPrefixPublicDelegatedPrefix', + 'PublicDelegatedPrefix', + 'PublicDelegatedPrefixAggregatedList', + 'PublicDelegatedPrefixList', + 'PublicDelegatedPrefixPublicDelegatedSubPrefix', + 'PublicDelegatedPrefixesScopedList', + 'Quota', + 'RawDisk', + 'RecreateInstancesInstanceGroupManagerRequest', + 'RecreateInstancesRegionInstanceGroupManagerRequest', + 'Reference', + 'Region', + 'RegionAutoscalerList', + 'RegionDiskTypeList', + 'RegionDisksAddResourcePoliciesRequest', + 'RegionDisksRemoveResourcePoliciesRequest', + 'RegionDisksResizeRequest', + 'RegionInstanceGroupList', + 'RegionInstanceGroupManagerDeleteInstanceConfigReq', + 'RegionInstanceGroupManagerList', + 'RegionInstanceGroupManagerPatchInstanceConfigReq', + 'RegionInstanceGroupManagerUpdateInstanceConfigReq', + 'RegionInstanceGroupManagersAbandonInstancesRequest', + 'RegionInstanceGroupManagersApplyUpdatesRequest', + 'RegionInstanceGroupManagersCreateInstancesRequest', + 'RegionInstanceGroupManagersDeleteInstancesRequest', + 'RegionInstanceGroupManagersListErrorsResponse', + 'RegionInstanceGroupManagersListInstanceConfigsResp', + 'RegionInstanceGroupManagersListInstancesResponse', + 'RegionInstanceGroupManagersRecreateRequest', + 'RegionInstanceGroupManagersSetTargetPoolsRequest', + 'RegionInstanceGroupManagersSetTemplateRequest', + 'RegionInstanceGroupsListInstances', + 'RegionInstanceGroupsListInstancesRequest', + 'RegionInstanceGroupsSetNamedPortsRequest', + 'RegionList', + 'RegionSetLabelsRequest', + 'RegionSetPolicyRequest', + 'RegionTargetHttpsProxiesSetSslCertificatesRequest', + 'RegionUrlMapsValidateRequest', + 'RemoveAssociationFirewallPolicyRequest', + 'RemoveHealthCheckTargetPoolRequest', + 'RemoveInstanceTargetPoolRequest', + 'RemoveInstancesInstanceGroupRequest', + 'RemovePeeringNetworkRequest', + 'RemoveResourcePoliciesDiskRequest', + 'RemoveResourcePoliciesInstanceRequest', + 'RemoveResourcePoliciesRegionDiskRequest', + 'RemoveRuleFirewallPolicyRequest', + 'RemoveRuleSecurityPolicyRequest', + 'RequestMirrorPolicy', + 'Reservation', + 'ReservationAffinity', + 'ReservationAggregatedList', + 'ReservationList', + 'ReservationsResizeRequest', + 'ReservationsScopedList', + 'ResetInstanceRequest', + 'ResizeDiskRequest', + 'ResizeInstanceGroupManagerRequest', + 'ResizeRegionDiskRequest', + 'ResizeRegionInstanceGroupManagerRequest', + 'ResizeReservationRequest', + 'ResourceCommitment', + 'ResourceGroupReference', + 'ResourcePoliciesScopedList', + 'ResourcePolicy', + 'ResourcePolicyAggregatedList', + 'ResourcePolicyDailyCycle', + 'ResourcePolicyGroupPlacementPolicy', + 'ResourcePolicyHourlyCycle', + 'ResourcePolicyInstanceSchedulePolicy', + 'ResourcePolicyInstanceSchedulePolicySchedule', + 'ResourcePolicyList', + 'ResourcePolicyResourceStatus', + 'ResourcePolicyResourceStatusInstanceSchedulePolicyStatus', + 'ResourcePolicySnapshotSchedulePolicy', + 'ResourcePolicySnapshotSchedulePolicyRetentionPolicy', + 'ResourcePolicySnapshotSchedulePolicySchedule', + 'ResourcePolicySnapshotSchedulePolicySnapshotProperties', + 'ResourcePolicyWeeklyCycle', + 'ResourcePolicyWeeklyCycleDayOfWeek', + 'Route', + 'RouteAsPath', + 'RouteList', + 'Router', + 'RouterAdvertisedIpRange', + 'RouterAggregatedList', + 'RouterBgp', + 'RouterBgpPeer', + 'RouterBgpPeerBfd', + 'RouterInterface', + 'RouterList', + 'RouterNat', + 'RouterNatLogConfig', + 'RouterNatRule', + 'RouterNatRuleAction', + 'RouterNatSubnetworkToNat', + 'RouterStatus', + 'RouterStatusBgpPeerStatus', + 'RouterStatusNatStatus', + 'RouterStatusNatStatusNatRuleStatus', + 'RouterStatusResponse', + 'RoutersPreviewResponse', + 'RoutersScopedList', + 'Rule', + 'SSLHealthCheck', + 'ScalingScheduleStatus', + 'Scheduling', + 'SchedulingNodeAffinity', + 'ScratchDisks', + 'Screenshot', + 'SecurityPoliciesListPreconfiguredExpressionSetsResponse', + 'SecurityPoliciesWafConfig', + 'SecurityPolicy', + 'SecurityPolicyAdaptiveProtectionConfig', + 'SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig', + 'SecurityPolicyAdvancedOptionsConfig', + 'SecurityPolicyList', + 'SecurityPolicyReference', + 'SecurityPolicyRule', + 'SecurityPolicyRuleMatcher', + 'SecurityPolicyRuleMatcherConfig', + 'SecuritySettings', + 'SendDiagnosticInterruptInstanceRequest', + 'SendDiagnosticInterruptInstanceResponse', + 'SerialPortOutput', + 'ServerBinding', + 'ServiceAccount', + 'ServiceAttachment', + 'ServiceAttachmentAggregatedList', + 'ServiceAttachmentConnectedEndpoint', + 'ServiceAttachmentConsumerProjectLimit', + 'ServiceAttachmentList', + 'ServiceAttachmentsScopedList', + 'SetBackendServiceTargetSslProxyRequest', + 'SetBackendServiceTargetTcpProxyRequest', + 'SetBackupTargetPoolRequest', + 'SetCommonInstanceMetadataProjectRequest', + 'SetDefaultNetworkTierProjectRequest', + 'SetDeletionProtectionInstanceRequest', + 'SetDiskAutoDeleteInstanceRequest', + 'SetIamPolicyDiskRequest', + 'SetIamPolicyFirewallPolicyRequest', + 'SetIamPolicyImageRequest', + 'SetIamPolicyInstanceRequest', + 'SetIamPolicyInstanceTemplateRequest', + 'SetIamPolicyLicenseRequest', + 'SetIamPolicyNodeGroupRequest', + 'SetIamPolicyNodeTemplateRequest', + 'SetIamPolicyRegionDiskRequest', + 'SetIamPolicyReservationRequest', + 'SetIamPolicyResourcePolicyRequest', + 'SetIamPolicyServiceAttachmentRequest', + 'SetIamPolicySnapshotRequest', + 'SetIamPolicySubnetworkRequest', + 'SetInstanceTemplateInstanceGroupManagerRequest', + 'SetInstanceTemplateRegionInstanceGroupManagerRequest', + 'SetLabelsDiskRequest', + 'SetLabelsExternalVpnGatewayRequest', + 'SetLabelsForwardingRuleRequest', + 'SetLabelsGlobalForwardingRuleRequest', + 'SetLabelsImageRequest', + 'SetLabelsInstanceRequest', + 'SetLabelsRegionDiskRequest', + 'SetLabelsSnapshotRequest', + 'SetLabelsVpnGatewayRequest', + 'SetMachineResourcesInstanceRequest', + 'SetMachineTypeInstanceRequest', + 'SetMetadataInstanceRequest', + 'SetMinCpuPlatformInstanceRequest', + 'SetNamedPortsInstanceGroupRequest', + 'SetNamedPortsRegionInstanceGroupRequest', + 'SetNodeTemplateNodeGroupRequest', + 'SetPrivateIpGoogleAccessSubnetworkRequest', + 'SetProxyHeaderTargetSslProxyRequest', + 'SetProxyHeaderTargetTcpProxyRequest', + 'SetQuicOverrideTargetHttpsProxyRequest', + 'SetSchedulingInstanceRequest', + 'SetSecurityPolicyBackendServiceRequest', + 'SetServiceAccountInstanceRequest', + 'SetShieldedInstanceIntegrityPolicyInstanceRequest', + 'SetSslCertificatesRegionTargetHttpsProxyRequest', + 'SetSslCertificatesTargetHttpsProxyRequest', + 'SetSslCertificatesTargetSslProxyRequest', + 'SetSslPolicyTargetHttpsProxyRequest', + 'SetSslPolicyTargetSslProxyRequest', + 'SetTagsInstanceRequest', + 'SetTargetForwardingRuleRequest', + 'SetTargetGlobalForwardingRuleRequest', + 'SetTargetPoolsInstanceGroupManagerRequest', + 'SetTargetPoolsRegionInstanceGroupManagerRequest', + 'SetUrlMapRegionTargetHttpProxyRequest', + 'SetUrlMapRegionTargetHttpsProxyRequest', + 'SetUrlMapTargetHttpProxyRequest', + 'SetUrlMapTargetHttpsProxyRequest', + 'SetUsageExportBucketProjectRequest', + 'ShieldedInstanceConfig', + 'ShieldedInstanceIdentity', + 'ShieldedInstanceIdentityEntry', + 'ShieldedInstanceIntegrityPolicy', + 'SignedUrlKey', + 'SimulateMaintenanceEventInstanceRequest', + 'Snapshot', + 'SnapshotList', + 'SourceInstanceParams', + 'SslCertificate', + 'SslCertificateAggregatedList', + 'SslCertificateList', + 'SslCertificateManagedSslCertificate', + 'SslCertificateSelfManagedSslCertificate', + 'SslCertificatesScopedList', + 'SslPoliciesList', + 'SslPoliciesListAvailableFeaturesResponse', + 'SslPolicy', + 'SslPolicyReference', + 'StartInstanceRequest', + 'StartWithEncryptionKeyInstanceRequest', + 'StatefulPolicy', + 'StatefulPolicyPreservedState', + 'StatefulPolicyPreservedStateDiskDevice', + 'StopInstanceRequest', + 'Subnetwork', + 'SubnetworkAggregatedList', + 'SubnetworkList', + 'SubnetworkLogConfig', + 'SubnetworkSecondaryRange', + 'SubnetworksExpandIpCidrRangeRequest', + 'SubnetworksScopedList', + 'SubnetworksSetPrivateIpGoogleAccessRequest', + 'Subsetting', + 'SwitchToCustomModeNetworkRequest', + 'TCPHealthCheck', + 'Tags', + 'TargetGrpcProxy', + 'TargetGrpcProxyList', + 'TargetHttpProxiesScopedList', + 'TargetHttpProxy', + 'TargetHttpProxyAggregatedList', + 'TargetHttpProxyList', + 'TargetHttpsProxiesScopedList', + 'TargetHttpsProxiesSetQuicOverrideRequest', + 'TargetHttpsProxiesSetSslCertificatesRequest', + 'TargetHttpsProxy', + 'TargetHttpsProxyAggregatedList', + 'TargetHttpsProxyList', + 'TargetInstance', + 'TargetInstanceAggregatedList', + 'TargetInstanceList', + 'TargetInstancesScopedList', + 'TargetPool', + 'TargetPoolAggregatedList', + 'TargetPoolInstanceHealth', + 'TargetPoolList', + 'TargetPoolsAddHealthCheckRequest', + 'TargetPoolsAddInstanceRequest', + 'TargetPoolsRemoveHealthCheckRequest', + 'TargetPoolsRemoveInstanceRequest', + 'TargetPoolsScopedList', + 'TargetReference', + 'TargetSslProxiesSetBackendServiceRequest', + 'TargetSslProxiesSetProxyHeaderRequest', + 'TargetSslProxiesSetSslCertificatesRequest', + 'TargetSslProxy', + 'TargetSslProxyList', + 'TargetTcpProxiesSetBackendServiceRequest', + 'TargetTcpProxiesSetProxyHeaderRequest', + 'TargetTcpProxy', + 'TargetTcpProxyList', + 'TargetVpnGateway', + 'TargetVpnGatewayAggregatedList', + 'TargetVpnGatewayList', + 'TargetVpnGatewaysScopedList', + 'TestFailure', + 'TestIamPermissionsDiskRequest', + 'TestIamPermissionsExternalVpnGatewayRequest', + 'TestIamPermissionsFirewallPolicyRequest', + 'TestIamPermissionsImageRequest', + 'TestIamPermissionsInstanceRequest', + 'TestIamPermissionsInstanceTemplateRequest', + 'TestIamPermissionsLicenseCodeRequest', + 'TestIamPermissionsLicenseRequest', + 'TestIamPermissionsNetworkEndpointGroupRequest', + 'TestIamPermissionsNodeGroupRequest', + 'TestIamPermissionsNodeTemplateRequest', + 'TestIamPermissionsPacketMirroringRequest', + 'TestIamPermissionsRegionDiskRequest', + 'TestIamPermissionsReservationRequest', + 'TestIamPermissionsResourcePolicyRequest', + 'TestIamPermissionsServiceAttachmentRequest', + 'TestIamPermissionsSnapshotRequest', + 'TestIamPermissionsSubnetworkRequest', + 'TestIamPermissionsVpnGatewayRequest', + 'TestPermissionsRequest', + 'TestPermissionsResponse', + 'Uint128', + 'UpdateAccessConfigInstanceRequest', + 'UpdateAutoscalerRequest', + 'UpdateBackendBucketRequest', + 'UpdateBackendServiceRequest', + 'UpdateDisplayDeviceInstanceRequest', + 'UpdateFirewallRequest', + 'UpdateHealthCheckRequest', + 'UpdateInstanceRequest', + 'UpdateNetworkInterfaceInstanceRequest', + 'UpdatePeeringNetworkRequest', + 'UpdatePerInstanceConfigsInstanceGroupManagerRequest', + 'UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest', + 'UpdateRegionAutoscalerRequest', + 'UpdateRegionBackendServiceRequest', + 'UpdateRegionHealthCheckRequest', + 'UpdateRegionUrlMapRequest', + 'UpdateRouterRequest', + 'UpdateShieldedInstanceConfigInstanceRequest', + 'UpdateUrlMapRequest', + 'UrlMap', + 'UrlMapList', + 'UrlMapReference', + 'UrlMapTest', + 'UrlMapTestHeader', + 'UrlMapValidationResult', + 'UrlMapsAggregatedList', + 'UrlMapsScopedList', + 'UrlMapsValidateRequest', + 'UrlMapsValidateResponse', + 'UrlRewrite', + 'UsableSubnetwork', + 'UsableSubnetworkSecondaryRange', + 'UsableSubnetworksAggregatedList', + 'UsageExportLocation', + 'ValidateRegionUrlMapRequest', + 'ValidateUrlMapRequest', + 'VmEndpointNatMappings', + 'VmEndpointNatMappingsInterfaceNatMappings', + 'VmEndpointNatMappingsList', + 'VpnGateway', + 'VpnGatewayAggregatedList', + 'VpnGatewayList', + 'VpnGatewayStatus', + 'VpnGatewayStatusHighAvailabilityRequirementState', + 'VpnGatewayStatusTunnel', + 'VpnGatewayStatusVpnConnection', + 'VpnGatewayVpnGatewayInterface', + 'VpnGatewaysGetStatusResponse', + 'VpnGatewaysScopedList', + 'VpnTunnel', + 'VpnTunnelAggregatedList', + 'VpnTunnelList', + 'VpnTunnelsScopedList', + 'WafExpressionSet', + 'WafExpressionSetExpression', + 'WaitGlobalOperationRequest', + 'WaitRegionOperationRequest', + 'WaitZoneOperationRequest', + 'Warning', + 'Warnings', + 'WeightedBackendService', + 'XpnHostList', + 'XpnResourceId', + 'Zone', + 'ZoneList', + 'ZoneSetLabelsRequest', + 'ZoneSetPolicyRequest', + }, +) + + +class AbandonInstancesInstanceGroupManagerRequest(proto.Message): + r"""Messages + A request message for InstanceGroupManagers.AbandonInstances. + See the method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + instance_group_managers_abandon_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersAbandonInstancesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_abandon_instances_request_resource = proto.Field( + proto.MESSAGE, + number=320929016, + message='InstanceGroupManagersAbandonInstancesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AbandonInstancesRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.AbandonInstances. See the method + description for details. + + Attributes: + instance_group_manager (str): + Name of the managed instance group. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_group_managers_abandon_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersAbandonInstancesRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_managers_abandon_instances_request_resource = proto.Field( + proto.MESSAGE, + number=488499491, + message='RegionInstanceGroupManagersAbandonInstancesRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class AcceleratorConfig(proto.Message): + r"""A specification of the type and number of accelerator cards + attached to the instance. + + Attributes: + accelerator_count (int): + The number of the guest accelerator cards + exposed to this instance. + + This field is a member of `oneof`_ ``_accelerator_count``. + accelerator_type (str): + Full or partial URL of the accelerator type + resource to attach to this instance. For + example: projects/my-project/zones/us- + central1-c/acceleratorTypes/nvidia-tesla-p100 If + you are creating an instance template, specify + only the accelerator name. See GPUs on Compute + Engine for a full list of accelerator types. + + This field is a member of `oneof`_ ``_accelerator_type``. + """ + + accelerator_count = proto.Field( + proto.INT32, + number=504879675, + optional=True, + ) + accelerator_type = proto.Field( + proto.STRING, + number=138031246, + optional=True, + ) + + +class AcceleratorType(proto.Message): + r"""Represents an Accelerator Type resource. Google Cloud + Platform provides graphics processing units (accelerators) that + you can add to VM instances to improve or accelerate performance + when working with intensive workloads. For more information, + read GPUs on Compute Engine. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + deprecated (google.cloud.compute_v1.types.DeprecationStatus): + [Output Only] The deprecation status associated with this + accelerator type. + + This field is a member of `oneof`_ ``_deprecated``. + description (str): + [Output Only] An optional textual description of the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] The type of the resource. Always + compute#acceleratorType for accelerator types. + + This field is a member of `oneof`_ ``_kind``. + maximum_cards_per_instance (int): + [Output Only] Maximum number of accelerator cards allowed + per instance. + + This field is a member of `oneof`_ ``_maximum_cards_per_instance``. + name (str): + [Output Only] Name of the resource. + + This field is a member of `oneof`_ ``_name``. + self_link (str): + [Output Only] Server-defined, fully qualified URL for this + resource. + + This field is a member of `oneof`_ ``_self_link``. + zone (str): + [Output Only] The name of the zone where the accelerator + type resides, such as us-central1-a. You must specify this + field as part of the HTTP request URL. It is not settable as + a field in the request body. + + This field is a member of `oneof`_ ``_zone``. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + deprecated = proto.Field( + proto.MESSAGE, + number=515138995, + optional=True, + message='DeprecationStatus', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + maximum_cards_per_instance = proto.Field( + proto.INT32, + number=263814482, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class AcceleratorTypeAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.AcceleratorTypeAggregatedList.ItemsEntry]): + A list of AcceleratorTypesScopedList + resources. + kind (str): + [Output Only] Type of resource. Always + compute#acceleratorTypeAggregatedList for aggregated lists + of accelerator types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='AcceleratorTypesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AcceleratorTypeList(proto.Message): + r"""Contains a list of accelerator types. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.AcceleratorType]): + A list of AcceleratorType resources. + kind (str): + [Output Only] Type of resource. Always + compute#acceleratorTypeList for lists of accelerator types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='AcceleratorType', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AcceleratorTypesScopedList(proto.Message): + r""" + + Attributes: + accelerator_types (Sequence[google.cloud.compute_v1.types.AcceleratorType]): + [Output Only] A list of accelerator types contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] An informational warning that appears when the + accelerator types list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + accelerator_types = proto.RepeatedField( + proto.MESSAGE, + number=520872357, + message='AcceleratorType', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class Accelerators(proto.Message): + r""" + + Attributes: + guest_accelerator_count (int): + Number of accelerator cards exposed to the + guest. + + This field is a member of `oneof`_ ``_guest_accelerator_count``. + guest_accelerator_type (str): + The accelerator type resource name, not a + full URL, e.g. 'nvidia-tesla-k80'. + + This field is a member of `oneof`_ ``_guest_accelerator_type``. + """ + + guest_accelerator_count = proto.Field( + proto.INT32, + number=479079316, + optional=True, + ) + guest_accelerator_type = proto.Field( + proto.STRING, + number=293064725, + optional=True, + ) + + +class AccessConfig(proto.Message): + r"""An access configuration attached to an instance's network + interface. Only one access config per instance is supported. + + Attributes: + external_ipv6 (str): + [Output Only] The first IPv6 address of the external IPv6 + range associated with this instance, prefix length is stored + in externalIpv6PrefixLength in ipv6AccessConfig. The field + is output only, an IPv6 address from a subnetwork associated + with the instance will be allocated dynamically. + + This field is a member of `oneof`_ ``_external_ipv6``. + external_ipv6_prefix_length (int): + [Output Only] The prefix length of the external IPv6 range. + + This field is a member of `oneof`_ ``_external_ipv6_prefix_length``. + kind (str): + [Output Only] Type of the resource. Always + compute#accessConfig for access configs. + + This field is a member of `oneof`_ ``_kind``. + name (str): + The name of this access configuration. The + default and recommended name is External NAT, + but you can use any arbitrary string, such as My + external IP or Network Access. + + This field is a member of `oneof`_ ``_name``. + nat_i_p (str): + An external IP address associated with this + instance. Specify an unused static external IP + address available to the project or leave this + field undefined to use an IP from a shared + ephemeral IP address pool. If you specify a + static external IP address, it must live in the + same region as the zone of the instance. + + This field is a member of `oneof`_ ``_nat_i_p``. + network_tier (str): + This signifies the networking tier used for + configuring this access configuration and can + only take the following values: PREMIUM, + STANDARD. If an AccessConfig is specified + without a valid external IP address, an + ephemeral IP will be created with this + networkTier. If an AccessConfig with a valid + external IP address is specified, it must match + that of the networkTier associated with the + Address resource owning that IP. Check the + NetworkTier enum for the list of possible + values. + + This field is a member of `oneof`_ ``_network_tier``. + public_ptr_domain_name (str): + The DNS domain name for the public PTR record. You can set + this field only if the ``setPublicPtr`` field is enabled. + + This field is a member of `oneof`_ ``_public_ptr_domain_name``. + set_public_ptr (bool): + Specifies whether a public DNS 'PTR' record + should be created to map the external IP address + of the instance to a DNS domain name. + + This field is a member of `oneof`_ ``_set_public_ptr``. + type_ (str): + The type of configuration. The default and only option is + ONE_TO_ONE_NAT. Check the Type enum for the list of possible + values. + + This field is a member of `oneof`_ ``_type``. + """ + class NetworkTier(proto.Enum): + r"""This signifies the networking tier used for configuring this + access configuration and can only take the following values: + PREMIUM, STANDARD. If an AccessConfig is specified without a + valid external IP address, an ephemeral IP will be created with + this networkTier. If an AccessConfig with a valid external IP + address is specified, it must match that of the networkTier + associated with the Address resource owning that IP. + """ + UNDEFINED_NETWORK_TIER = 0 + PREMIUM = 399530551 + STANDARD = 484642493 + + class Type(proto.Enum): + r"""The type of configuration. The default and only option is + ONE_TO_ONE_NAT. + """ + UNDEFINED_TYPE = 0 + DIRECT_IPV6 = 4397213 + ONE_TO_ONE_NAT = 84090205 + + external_ipv6 = proto.Field( + proto.STRING, + number=532703707, + optional=True, + ) + external_ipv6_prefix_length = proto.Field( + proto.INT32, + number=425672143, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + nat_i_p = proto.Field( + proto.STRING, + number=117634556, + optional=True, + ) + network_tier = proto.Field( + proto.STRING, + number=517397843, + optional=True, + ) + public_ptr_domain_name = proto.Field( + proto.STRING, + number=316599167, + optional=True, + ) + set_public_ptr = proto.Field( + proto.BOOL, + number=523870229, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class AddAccessConfigInstanceRequest(proto.Message): + r"""A request message for Instances.AddAccessConfig. See the + method description for details. + + Attributes: + access_config_resource (google.cloud.compute_v1.types.AccessConfig): + The body resource for this request + instance (str): + The instance name for this request. + network_interface (str): + The name of the network interface to add to + this instance. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + access_config_resource = proto.Field( + proto.MESSAGE, + number=387825552, + message='AccessConfig', + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + network_interface = proto.Field( + proto.STRING, + number=365387880, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AddAssociationFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.AddAssociation. See + the method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + firewall_policy_association_resource (google.cloud.compute_v1.types.FirewallPolicyAssociation): + The body resource for this request + replace_existing_association (bool): + Indicates whether or not to replace it if an + association of the attachment already exists. + This is false by default, in which case an error + will be returned if an association already + exists. + + This field is a member of `oneof`_ ``_replace_existing_association``. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + firewall_policy_association_resource = proto.Field( + proto.MESSAGE, + number=259546170, + message='FirewallPolicyAssociation', + ) + replace_existing_association = proto.Field( + proto.BOOL, + number=209541240, + optional=True, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class AddHealthCheckTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.AddHealthCheck. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_pool (str): + Name of the target pool to add a health check + to. + target_pools_add_health_check_request_resource (google.cloud.compute_v1.types.TargetPoolsAddHealthCheckRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + target_pools_add_health_check_request_resource = proto.Field( + proto.MESSAGE, + number=269573412, + message='TargetPoolsAddHealthCheckRequest', + ) + + +class AddInstanceTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.AddInstance. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_pool (str): + Name of the TargetPool resource to add + instances to. + target_pools_add_instance_request_resource (google.cloud.compute_v1.types.TargetPoolsAddInstanceRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + target_pools_add_instance_request_resource = proto.Field( + proto.MESSAGE, + number=428796404, + message='TargetPoolsAddInstanceRequest', + ) + + +class AddInstancesInstanceGroupRequest(proto.Message): + r"""A request message for InstanceGroups.AddInstances. See the + method description for details. + + Attributes: + instance_group (str): + The name of the instance group where you are + adding instances. + instance_groups_add_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupsAddInstancesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the instance group + is located. + """ + + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + instance_groups_add_instances_request_resource = proto.Field( + proto.MESSAGE, + number=453713246, + message='InstanceGroupsAddInstancesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AddNodesNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.AddNodes. See the method + description for details. + + Attributes: + node_group (str): + Name of the NodeGroup resource. + node_groups_add_nodes_request_resource (google.cloud.compute_v1.types.NodeGroupsAddNodesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + node_group = proto.Field( + proto.STRING, + number=469958146, + ) + node_groups_add_nodes_request_resource = proto.Field( + proto.MESSAGE, + number=131263288, + message='NodeGroupsAddNodesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AddPeeringNetworkRequest(proto.Message): + r"""A request message for Networks.AddPeering. See the method + description for details. + + Attributes: + network (str): + Name of the network resource to add peering + to. + networks_add_peering_request_resource (google.cloud.compute_v1.types.NetworksAddPeeringRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + networks_add_peering_request_resource = proto.Field( + proto.MESSAGE, + number=388810421, + message='NetworksAddPeeringRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class AddResourcePoliciesDiskRequest(proto.Message): + r"""A request message for Disks.AddResourcePolicies. See the + method description for details. + + Attributes: + disk (str): + The disk name for this request. + disks_add_resource_policies_request_resource (google.cloud.compute_v1.types.DisksAddResourcePoliciesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + disks_add_resource_policies_request_resource = proto.Field( + proto.MESSAGE, + number=496483363, + message='DisksAddResourcePoliciesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AddResourcePoliciesInstanceRequest(proto.Message): + r"""A request message for Instances.AddResourcePolicies. See the + method description for details. + + Attributes: + instance (str): + The instance name for this request. + instances_add_resource_policies_request_resource (google.cloud.compute_v1.types.InstancesAddResourcePoliciesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_add_resource_policies_request_resource = proto.Field( + proto.MESSAGE, + number=489351963, + message='InstancesAddResourcePoliciesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AddResourcePoliciesRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.AddResourcePolicies. See + the method description for details. + + Attributes: + disk (str): + The disk name for this request. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + region_disks_add_resource_policies_request_resource (google.cloud.compute_v1.types.RegionDisksAddResourcePoliciesRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_disks_add_resource_policies_request_resource = proto.Field( + proto.MESSAGE, + number=284196750, + message='RegionDisksAddResourcePoliciesRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class AddRuleFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.AddRule. See the + method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + firewall_policy_rule_resource (google.cloud.compute_v1.types.FirewallPolicyRule): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + firewall_policy_rule_resource = proto.Field( + proto.MESSAGE, + number=250523523, + message='FirewallPolicyRule', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class AddRuleSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.AddRule. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + security_policy (str): + Name of the security policy to update. + security_policy_rule_resource (google.cloud.compute_v1.types.SecurityPolicyRule): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + ) + security_policy_rule_resource = proto.Field( + proto.MESSAGE, + number=402693443, + message='SecurityPolicyRule', + ) + + +class AddSignedUrlKeyBackendBucketRequest(proto.Message): + r"""A request message for BackendBuckets.AddSignedUrlKey. See the + method description for details. + + Attributes: + backend_bucket (str): + Name of the BackendBucket resource to which + the Signed URL Key should be added. The name + should conform to RFC1035. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + signed_url_key_resource (google.cloud.compute_v1.types.SignedUrlKey): + The body resource for this request + """ + + backend_bucket = proto.Field( + proto.STRING, + number=91714037, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + signed_url_key_resource = proto.Field( + proto.MESSAGE, + number=457625985, + message='SignedUrlKey', + ) + + +class AddSignedUrlKeyBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.AddSignedUrlKey. See + the method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to which + the Signed URL Key should be added. The name + should conform to RFC1035. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + signed_url_key_resource (google.cloud.compute_v1.types.SignedUrlKey): + The body resource for this request + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + signed_url_key_resource = proto.Field( + proto.MESSAGE, + number=457625985, + message='SignedUrlKey', + ) + + +class Address(proto.Message): + r"""Represents an IP Address resource. Google Compute Engine has two IP + Address resources: \* `Global (external and + internal) `__ + \* `Regional (external and + internal) `__ + For more information, see Reserving a static external IP address. + + Attributes: + address (str): + The static IP address represented by this + resource. + + This field is a member of `oneof`_ ``_address``. + address_type (str): + The type of address to reserve, either + INTERNAL or EXTERNAL. If unspecified, defaults + to EXTERNAL. Check the AddressType enum for the + list of possible values. + + This field is a member of `oneof`_ ``_address_type``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this field when you create the resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + ip_version (str): + The IP version that will be used by this + address. Valid options are IPV4 or IPV6. This + can only be specified for a global address. + Check the IpVersion enum for the list of + possible values. + + This field is a member of `oneof`_ ``_ip_version``. + kind (str): + [Output Only] Type of the resource. Always compute#address + for addresses. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?``. The first character must be + a lowercase letter, and all following characters (except for + the last character) must be a dash, lowercase letter, or + digit. The last character must be a lowercase letter or + digit. + + This field is a member of `oneof`_ ``_name``. + network (str): + The URL of the network in which to reserve the address. This + field can only be used with INTERNAL type with the + VPC_PEERING purpose. + + This field is a member of `oneof`_ ``_network``. + network_tier (str): + This signifies the networking tier used for + configuring this address and can only take the + following values: PREMIUM or STANDARD. Internal + IP addresses are always Premium Tier; global + external IP addresses are always Premium Tier; + regional external IP addresses can be either + Standard or Premium Tier. If this field is not + specified, it is assumed to be PREMIUM. Check + the NetworkTier enum for the list of possible + values. + + This field is a member of `oneof`_ ``_network_tier``. + prefix_length (int): + The prefix length if the resource represents + an IP range. + + This field is a member of `oneof`_ ``_prefix_length``. + purpose (str): + The purpose of this resource, which can be one of the + following values: - GCE_ENDPOINT for addresses that are used + by VM instances, alias IP ranges, load balancers, and + similar resources. - DNS_RESOLVER for a DNS resolver address + in a subnetwork for a Cloud DNS inbound forwarder IP + addresses (regional internal IP address in a subnet of a VPC + network) - VPC_PEERING for global internal IP addresses used + for private services access allocated ranges. - NAT_AUTO for + the regional external IP addresses used by Cloud NAT when + allocating addresses using . - IPSEC_INTERCONNECT for + addresses created from a private IP range that are reserved + for a VLAN attachment in an *IPsec-encrypted Cloud + Interconnect* configuration. These addresses are regional + resources. Not currently available publicly. - + ``SHARED_LOADBALANCER_VIP`` for an internal IP address that + is assigned to multiple internal forwarding rules. - + ``PRIVATE_SERVICE_CONNECT`` for a private network address + that is used to configure Private Service Connect. Only + global internal addresses can use this purpose. Check the + Purpose enum for the list of possible values. + + This field is a member of `oneof`_ ``_purpose``. + region (str): + [Output Only] The URL of the region where a regional address + resides. For regional addresses, you must specify the region + as a path parameter in the HTTP request URL. *This field is + not applicable to global addresses.* + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + status (str): + [Output Only] The status of the address, which can be one of + RESERVING, RESERVED, or IN_USE. An address that is RESERVING + is currently in the process of being reserved. A RESERVED + address is currently reserved and available to use. An + IN_USE address is currently being used by another resource + and is not available. Check the Status enum for the list of + possible values. + + This field is a member of `oneof`_ ``_status``. + subnetwork (str): + The URL of the subnetwork in which to reserve the address. + If an IP address is specified, it must be within the + subnetwork's IP range. This field can only be used with + INTERNAL type with a GCE_ENDPOINT or DNS_RESOLVER purpose. + + This field is a member of `oneof`_ ``_subnetwork``. + users (Sequence[str]): + [Output Only] The URLs of the resources that are using this + address. + """ + class AddressType(proto.Enum): + r"""The type of address to reserve, either INTERNAL or EXTERNAL. + If unspecified, defaults to EXTERNAL. + """ + UNDEFINED_ADDRESS_TYPE = 0 + EXTERNAL = 35607499 + INTERNAL = 279295677 + UNSPECIFIED_TYPE = 53933922 + + class IpVersion(proto.Enum): + r"""The IP version that will be used by this address. Valid + options are IPV4 or IPV6. This can only be specified for a + global address. + """ + UNDEFINED_IP_VERSION = 0 + IPV4 = 2254341 + IPV6 = 2254343 + UNSPECIFIED_VERSION = 21850000 + + class NetworkTier(proto.Enum): + r"""This signifies the networking tier used for configuring this + address and can only take the following values: PREMIUM or + STANDARD. Internal IP addresses are always Premium Tier; global + external IP addresses are always Premium Tier; regional external + IP addresses can be either Standard or Premium Tier. If this + field is not specified, it is assumed to be PREMIUM. + """ + UNDEFINED_NETWORK_TIER = 0 + PREMIUM = 399530551 + STANDARD = 484642493 + + class Purpose(proto.Enum): + r"""The purpose of this resource, which can be one of the following + values: - GCE_ENDPOINT for addresses that are used by VM instances, + alias IP ranges, load balancers, and similar resources. - + DNS_RESOLVER for a DNS resolver address in a subnetwork for a Cloud + DNS inbound forwarder IP addresses (regional internal IP address in + a subnet of a VPC network) - VPC_PEERING for global internal IP + addresses used for private services access allocated ranges. - + NAT_AUTO for the regional external IP addresses used by Cloud NAT + when allocating addresses using . - IPSEC_INTERCONNECT for addresses + created from a private IP range that are reserved for a VLAN + attachment in an *IPsec-encrypted Cloud Interconnect* configuration. + These addresses are regional resources. Not currently available + publicly. - ``SHARED_LOADBALANCER_VIP`` for an internal IP address + that is assigned to multiple internal forwarding rules. - + ``PRIVATE_SERVICE_CONNECT`` for a private network address that is + used to configure Private Service Connect. Only global internal + addresses can use this purpose. + """ + UNDEFINED_PURPOSE = 0 + DNS_RESOLVER = 476114556 + GCE_ENDPOINT = 230515243 + IPSEC_INTERCONNECT = 340437251 + NAT_AUTO = 163666477 + PRIVATE_SERVICE_CONNECT = 48134724 + SHARED_LOADBALANCER_VIP = 294447572 + VPC_PEERING = 400800170 + + class Status(proto.Enum): + r"""[Output Only] The status of the address, which can be one of + RESERVING, RESERVED, or IN_USE. An address that is RESERVING is + currently in the process of being reserved. A RESERVED address is + currently reserved and available to use. An IN_USE address is + currently being used by another resource and is not available. + """ + UNDEFINED_STATUS = 0 + IN_USE = 17393485 + RESERVED = 432241448 + RESERVING = 514587225 + + address = proto.Field( + proto.STRING, + number=462920692, + optional=True, + ) + address_type = proto.Field( + proto.STRING, + number=264307877, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + ip_version = proto.Field( + proto.STRING, + number=294959552, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + network_tier = proto.Field( + proto.STRING, + number=517397843, + optional=True, + ) + prefix_length = proto.Field( + proto.INT32, + number=453565747, + optional=True, + ) + purpose = proto.Field( + proto.STRING, + number=316407070, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + optional=True, + ) + users = proto.RepeatedField( + proto.STRING, + number=111578632, + ) + + +class AddressAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.AddressAggregatedList.ItemsEntry]): + A list of AddressesScopedList resources. + kind (str): + [Output Only] Type of resource. Always + compute#addressAggregatedList for aggregated lists of + addresses. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='AddressesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AddressList(proto.Message): + r"""Contains a list of addresses. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Address]): + A list of Address resources. + kind (str): + [Output Only] Type of resource. Always compute#addressList + for lists of addresses. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Address', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AddressesScopedList(proto.Message): + r""" + + Attributes: + addresses (Sequence[google.cloud.compute_v1.types.Address]): + [Output Only] A list of addresses contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of addresses when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + addresses = proto.RepeatedField( + proto.MESSAGE, + number=337673122, + message='Address', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AdvancedMachineFeatures(proto.Message): + r"""Specifies options for controlling advanced machine features. + Options that would traditionally be configured in a BIOS belong + here. Features that require operating system support may have + corresponding entries in the GuestOsFeatures of an Image (e.g., + whether or not the OS in the Image supports nested + virtualization being enabled or disabled). + + Attributes: + enable_nested_virtualization (bool): + Whether to enable nested virtualization or + not (default is false). + + This field is a member of `oneof`_ ``_enable_nested_virtualization``. + threads_per_core (int): + The number of threads per physical core. To + disable simultaneous multithreading (SMT) set + this to 1. If unset, the maximum number of + threads supported per core by the underlying + processor is assumed. + + This field is a member of `oneof`_ ``_threads_per_core``. + """ + + enable_nested_virtualization = proto.Field( + proto.BOOL, + number=16639365, + optional=True, + ) + threads_per_core = proto.Field( + proto.INT32, + number=352611671, + optional=True, + ) + + +class AggregatedListAcceleratorTypesRequest(proto.Message): + r"""A request message for AcceleratorTypes.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListAddressesRequest(proto.Message): + r"""A request message for Addresses.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListAutoscalersRequest(proto.Message): + r"""A request message for Autoscalers.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListBackendServicesRequest(proto.Message): + r"""A request message for BackendServices.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListDiskTypesRequest(proto.Message): + r"""A request message for DiskTypes.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListDisksRequest(proto.Message): + r"""A request message for Disks.AggregatedList. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListForwardingRulesRequest(proto.Message): + r"""A request message for ForwardingRules.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListGlobalOperationsRequest(proto.Message): + r"""A request message for GlobalOperations.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListHealthChecksRequest(proto.Message): + r"""A request message for HealthChecks.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListInstanceGroupManagersRequest(proto.Message): + r"""A request message for InstanceGroupManagers.AggregatedList. + See the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListInstanceGroupsRequest(proto.Message): + r"""A request message for InstanceGroups.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListInstancesRequest(proto.Message): + r"""A request message for Instances.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListInterconnectAttachmentsRequest(proto.Message): + r"""A request message for InterconnectAttachments.AggregatedList. + See the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListMachineTypesRequest(proto.Message): + r"""A request message for MachineTypes.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListNetworkEndpointGroupsRequest(proto.Message): + r"""A request message for NetworkEndpointGroups.AggregatedList. + See the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListNodeGroupsRequest(proto.Message): + r"""A request message for NodeGroups.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListNodeTemplatesRequest(proto.Message): + r"""A request message for NodeTemplates.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListNodeTypesRequest(proto.Message): + r"""A request message for NodeTypes.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListPacketMirroringsRequest(proto.Message): + r"""A request message for PacketMirrorings.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListPublicDelegatedPrefixesRequest(proto.Message): + r"""A request message for PublicDelegatedPrefixes.AggregatedList. + See the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListRegionCommitmentsRequest(proto.Message): + r"""A request message for RegionCommitments.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListReservationsRequest(proto.Message): + r"""A request message for Reservations.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListResourcePoliciesRequest(proto.Message): + r"""A request message for ResourcePolicies.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListRoutersRequest(proto.Message): + r"""A request message for Routers.AggregatedList. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListServiceAttachmentsRequest(proto.Message): + r"""A request message for ServiceAttachments.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListSslCertificatesRequest(proto.Message): + r"""A request message for SslCertificates.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListSubnetworksRequest(proto.Message): + r"""A request message for Subnetworks.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListTargetHttpProxiesRequest(proto.Message): + r"""A request message for TargetHttpProxies.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListTargetHttpsProxiesRequest(proto.Message): + r"""A request message for TargetHttpsProxies.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListTargetInstancesRequest(proto.Message): + r"""A request message for TargetInstances.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListTargetPoolsRequest(proto.Message): + r"""A request message for TargetPools.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListTargetVpnGatewaysRequest(proto.Message): + r"""A request message for TargetVpnGateways.AggregatedList. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListUrlMapsRequest(proto.Message): + r"""A request message for UrlMaps.AggregatedList. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Name of the project scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListVpnGatewaysRequest(proto.Message): + r"""A request message for VpnGateways.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AggregatedListVpnTunnelsRequest(proto.Message): + r"""A request message for VpnTunnels.AggregatedList. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + include_all_scopes (bool): + Indicates whether every visible scope for + each scope type (zone, region, global) should be + included in the response. For new resource types + added after this field, the flag has no effect + as new resource types will always include every + visible scope for each scope type in response. + For resource types which predate this field, if + this flag is omitted or false, only scopes of + the scope types where the resource type is + expected to be found will be included. + + This field is a member of `oneof`_ ``_include_all_scopes``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + include_all_scopes = proto.Field( + proto.BOOL, + number=391327988, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class AliasIpRange(proto.Message): + r"""An alias IP range attached to an instance's network + interface. + + Attributes: + ip_cidr_range (str): + The IP alias ranges to allocate for this + interface. This IP CIDR range must belong to the + specified subnetwork and cannot contain IP + addresses reserved by system or used by other + network interfaces. This range may be a single + IP address (such as 10.2.3.4), a netmask (such + as /24) or a CIDR-formatted string (such as + 10.1.2.0/24). + + This field is a member of `oneof`_ ``_ip_cidr_range``. + subnetwork_range_name (str): + The name of a subnetwork secondary IP range + from which to allocate an IP alias range. If not + specified, the primary range of the subnetwork + is used. + + This field is a member of `oneof`_ ``_subnetwork_range_name``. + """ + + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + subnetwork_range_name = proto.Field( + proto.STRING, + number=387995966, + optional=True, + ) + + +class AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk(proto.Message): + r""" + + Attributes: + disk_size_gb (int): + Specifies the size of the disk in base-2 GB. + + This field is a member of `oneof`_ ``_disk_size_gb``. + interface (str): + Specifies the disk interface to use for + attaching this disk, which is either SCSI or + NVME. The default is SCSI. For performance + characteristics of SCSI over NVMe, see Local SSD + performance. Check the Interface enum for the + list of possible values. + + This field is a member of `oneof`_ ``_interface``. + """ + class Interface(proto.Enum): + r"""Specifies the disk interface to use for attaching this disk, + which is either SCSI or NVME. The default is SCSI. For + performance characteristics of SCSI over NVMe, see Local SSD + performance. + """ + UNDEFINED_INTERFACE = 0 + NVME = 2408800 + SCSI = 2539686 + + disk_size_gb = proto.Field( + proto.INT64, + number=316263735, + optional=True, + ) + interface = proto.Field( + proto.STRING, + number=502623545, + optional=True, + ) + + +class AllocationSpecificSKUAllocationReservedInstanceProperties(proto.Message): + r"""Properties of the SKU instances being reserved. Next ID: 9 + + Attributes: + guest_accelerators (Sequence[google.cloud.compute_v1.types.AcceleratorConfig]): + Specifies accelerator type and count. + local_ssds (Sequence[google.cloud.compute_v1.types.AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk]): + Specifies amount of local ssd to reserve with + each instance. The type of disk is local-ssd. + location_hint (str): + An opaque location hint used to place the + allocation close to other resources. This field + is for use by internal tools that use the public + API. + + This field is a member of `oneof`_ ``_location_hint``. + machine_type (str): + Specifies type of machine (name only) which has fixed number + of vCPUs and fixed amount of memory. This also includes + specifying custom machine type following + custom-NUMBER_OF_CPUS-AMOUNT_OF_MEMORY pattern. + + This field is a member of `oneof`_ ``_machine_type``. + min_cpu_platform (str): + Minimum cpu platform the reservation. + + This field is a member of `oneof`_ ``_min_cpu_platform``. + """ + + guest_accelerators = proto.RepeatedField( + proto.MESSAGE, + number=463595119, + message='AcceleratorConfig', + ) + local_ssds = proto.RepeatedField( + proto.MESSAGE, + number=229951299, + message='AllocationSpecificSKUAllocationAllocatedInstancePropertiesReservedDisk', + ) + location_hint = proto.Field( + proto.STRING, + number=350519505, + optional=True, + ) + machine_type = proto.Field( + proto.STRING, + number=227711026, + optional=True, + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=242912759, + optional=True, + ) + + +class AllocationSpecificSKUReservation(proto.Message): + r"""This reservation type allows to pre allocate specific + instance configuration. Next ID: 5 + + Attributes: + count (int): + Specifies the number of resources that are + allocated. + + This field is a member of `oneof`_ ``_count``. + in_use_count (int): + [Output Only] Indicates how many instances are in use. + + This field is a member of `oneof`_ ``_in_use_count``. + instance_properties (google.cloud.compute_v1.types.AllocationSpecificSKUAllocationReservedInstanceProperties): + The instance properties for the reservation. + + This field is a member of `oneof`_ ``_instance_properties``. + """ + + count = proto.Field( + proto.INT64, + number=94851343, + optional=True, + ) + in_use_count = proto.Field( + proto.INT64, + number=493458877, + optional=True, + ) + instance_properties = proto.Field( + proto.MESSAGE, + number=215355165, + optional=True, + message='AllocationSpecificSKUAllocationReservedInstanceProperties', + ) + + +class Allowed(proto.Message): + r""" + + Attributes: + I_p_protocol (str): + The IP protocol to which this rule applies. + The protocol type is required when creating a + firewall rule. This value can either be one of + the following well known protocol strings (tcp, + udp, icmp, esp, ah, ipip, sctp) or the IP + protocol number. + + This field is a member of `oneof`_ ``_I_p_protocol``. + ports (Sequence[str]): + An optional list of ports to which this rule applies. This + field is only applicable for the UDP or TCP protocol. Each + entry must be either an integer or a range. If not + specified, this rule applies to connections through any + port. Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + """ + + I_p_protocol = proto.Field( + proto.STRING, + number=488094525, + optional=True, + ) + ports = proto.RepeatedField( + proto.STRING, + number=106854418, + ) + + +class ApplyUpdatesToInstancesInstanceGroupManagerRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.ApplyUpdatesToInstances. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group, + should conform to RFC1035. + instance_group_managers_apply_updates_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersApplyUpdatesRequest): + The body resource for this request + project (str): + Project ID for this request. + zone (str): + The name of the zone where the managed + instance group is located. Should conform to + RFC1035. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_apply_updates_request_resource = proto.Field( + proto.MESSAGE, + number=259242835, + message='InstanceGroupManagersApplyUpdatesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.ApplyUpdatesToInstances. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group, + should conform to RFC1035. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request, + should conform to RFC1035. + region_instance_group_managers_apply_updates_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersApplyUpdatesRequest): + The body resource for this request + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_managers_apply_updates_request_resource = proto.Field( + proto.MESSAGE, + number=76248318, + message='RegionInstanceGroupManagersApplyUpdatesRequest', + ) + + +class AttachDiskInstanceRequest(proto.Message): + r"""A request message for Instances.AttachDisk. See the method + description for details. + + Attributes: + attached_disk_resource (google.cloud.compute_v1.types.AttachedDisk): + The body resource for this request + force_attach (bool): + Whether to force attach the regional disk + even if it's currently attached to another + instance. If you try to force attach a zonal + disk to an instance, you will receive an error. + + This field is a member of `oneof`_ ``_force_attach``. + instance (str): + The instance name for this request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + attached_disk_resource = proto.Field( + proto.MESSAGE, + number=90605845, + message='AttachedDisk', + ) + force_attach = proto.Field( + proto.BOOL, + number=142758425, + optional=True, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest(proto.Message): + r"""A request message for + GlobalNetworkEndpointGroups.AttachNetworkEndpoints. See the + method description for details. + + Attributes: + global_network_endpoint_groups_attach_endpoints_request_resource (google.cloud.compute_v1.types.GlobalNetworkEndpointGroupsAttachEndpointsRequest): + The body resource for this request + network_endpoint_group (str): + The name of the network endpoint group where + you are attaching network endpoints to. It + should comply with RFC1035. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + global_network_endpoint_groups_attach_endpoints_request_resource = proto.Field( + proto.MESSAGE, + number=30691563, + message='GlobalNetworkEndpointGroupsAttachEndpointsRequest', + ) + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class AttachNetworkEndpointsNetworkEndpointGroupRequest(proto.Message): + r"""A request message for + NetworkEndpointGroups.AttachNetworkEndpoints. See the method + description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group where + you are attaching network endpoints to. It + should comply with RFC1035. + network_endpoint_groups_attach_endpoints_request_resource (google.cloud.compute_v1.types.NetworkEndpointGroupsAttachEndpointsRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the network + endpoint group is located. It should comply with + RFC1035. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + network_endpoint_groups_attach_endpoints_request_resource = proto.Field( + proto.MESSAGE, + number=531079, + message='NetworkEndpointGroupsAttachEndpointsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class AttachedDisk(proto.Message): + r"""An instance-attached disk resource. + + Attributes: + auto_delete (bool): + Specifies whether the disk will be auto- + eleted when the instance is deleted (but not + when the disk is detached from the instance). + + This field is a member of `oneof`_ ``_auto_delete``. + boot (bool): + Indicates that this is a boot disk. The + virtual machine will use the first partition of + the disk for its root filesystem. + + This field is a member of `oneof`_ ``_boot``. + device_name (str): + Specifies a unique device name of your choice that is + reflected into the /dev/disk/by-id/google-\* tree of a Linux + operating system running within the instance. This name can + be used to reference the device for mounting, resizing, and + so on, from within the instance. If not specified, the + server chooses a default device name to apply to this disk, + in the form persistent-disk-x, where x is a number assigned + by Google Compute Engine. This field is only applicable for + persistent disks. + + This field is a member of `oneof`_ ``_device_name``. + disk_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + Encrypts or decrypts a disk using a customer- + upplied encryption key. If you are creating a + new disk, this field encrypts the new disk using + an encryption key that you provide. If you are + attaching an existing disk that is already + encrypted, this field decrypts the disk using + the customer-supplied encryption key. If you + encrypt a disk using a customer-supplied key, + you must provide the same key again when you + attempt to use this resource at a later time. + For example, you must provide the key when you + create a snapshot or an image from the disk or + when you attach the disk to a virtual machine + instance. If you do not provide an encryption + key, then the disk will be encrypted using an + automatically generated key and you do not need + to provide a key to use the disk later. Instance + templates do not store customer-supplied + encryption keys, so you cannot use your own keys + to encrypt disks in a managed instance group. + + This field is a member of `oneof`_ ``_disk_encryption_key``. + disk_size_gb (int): + The size of the disk in GB. + + This field is a member of `oneof`_ ``_disk_size_gb``. + guest_os_features (Sequence[google.cloud.compute_v1.types.GuestOsFeature]): + A list of features to enable on the guest + operating system. Applicable only for bootable + images. Read Enabling guest operating system + features to see a list of available options. + index (int): + [Output Only] A zero-based index to this disk, where 0 is + reserved for the boot disk. If you have many disks attached + to an instance, each disk would have a unique index number. + + This field is a member of `oneof`_ ``_index``. + initialize_params (google.cloud.compute_v1.types.AttachedDiskInitializeParams): + [Input Only] Specifies the parameters for a new disk that + will be created alongside the new instance. Use + initialization parameters to create boot disks or local SSDs + attached to the new instance. This property is mutually + exclusive with the source property; you can only define one + or the other, but not both. + + This field is a member of `oneof`_ ``_initialize_params``. + interface (str): + Specifies the disk interface to use for + attaching this disk, which is either SCSI or + NVME. The default is SCSI. Persistent disks must + always use SCSI and the request will fail if you + attempt to attach a persistent disk in any other + format than SCSI. Local SSDs can use either NVME + or SCSI. For performance characteristics of SCSI + over NVMe, see Local SSD performance. Check the + Interface enum for the list of possible values. + + This field is a member of `oneof`_ ``_interface``. + kind (str): + [Output Only] Type of the resource. Always + compute#attachedDisk for attached disks. + + This field is a member of `oneof`_ ``_kind``. + licenses (Sequence[str]): + [Output Only] Any valid publicly visible licenses. + mode (str): + The mode in which to attach this disk, either READ_WRITE or + READ_ONLY. If not specified, the default is to attach the + disk in READ_WRITE mode. Check the Mode enum for the list of + possible values. + + This field is a member of `oneof`_ ``_mode``. + shielded_instance_initial_state (google.cloud.compute_v1.types.InitialStateConfig): + [Output Only] shielded vm initial state stored on disk + + This field is a member of `oneof`_ ``_shielded_instance_initial_state``. + source (str): + Specifies a valid partial or full URL to an + existing Persistent Disk resource. When creating + a new instance, one of + initializeParams.sourceImage or + initializeParams.sourceSnapshot or disks.source + is required except for local SSD. If desired, + you can also attach existing non-root persistent + disks using this property. This field is only + applicable for persistent disks. Note that for + InstanceTemplate, specify the disk name, not the + URL for the disk. + + This field is a member of `oneof`_ ``_source``. + type_ (str): + Specifies the type of the disk, either + SCRATCH or PERSISTENT. If not specified, the + default is PERSISTENT. Check the Type enum for + the list of possible values. + + This field is a member of `oneof`_ ``_type``. + """ + class Interface(proto.Enum): + r"""Specifies the disk interface to use for attaching this disk, + which is either SCSI or NVME. The default is SCSI. Persistent + disks must always use SCSI and the request will fail if you + attempt to attach a persistent disk in any other format than + SCSI. Local SSDs can use either NVME or SCSI. For performance + characteristics of SCSI over NVMe, see Local SSD performance. + """ + UNDEFINED_INTERFACE = 0 + NVME = 2408800 + SCSI = 2539686 + + class Mode(proto.Enum): + r"""The mode in which to attach this disk, either READ_WRITE or + READ_ONLY. If not specified, the default is to attach the disk in + READ_WRITE mode. + """ + UNDEFINED_MODE = 0 + READ_ONLY = 91950261 + READ_WRITE = 173607894 + + class Type(proto.Enum): + r"""Specifies the type of the disk, either SCRATCH or PERSISTENT. + If not specified, the default is PERSISTENT. + """ + UNDEFINED_TYPE = 0 + PERSISTENT = 460683927 + SCRATCH = 496778970 + + auto_delete = proto.Field( + proto.BOOL, + number=464761403, + optional=True, + ) + boot = proto.Field( + proto.BOOL, + number=3029746, + optional=True, + ) + device_name = proto.Field( + proto.STRING, + number=67541716, + optional=True, + ) + disk_encryption_key = proto.Field( + proto.MESSAGE, + number=271660677, + optional=True, + message='CustomerEncryptionKey', + ) + disk_size_gb = proto.Field( + proto.INT64, + number=316263735, + optional=True, + ) + guest_os_features = proto.RepeatedField( + proto.MESSAGE, + number=79294545, + message='GuestOsFeature', + ) + index = proto.Field( + proto.INT32, + number=100346066, + optional=True, + ) + initialize_params = proto.Field( + proto.MESSAGE, + number=17697045, + optional=True, + message='AttachedDiskInitializeParams', + ) + interface = proto.Field( + proto.STRING, + number=502623545, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + licenses = proto.RepeatedField( + proto.STRING, + number=337642578, + ) + mode = proto.Field( + proto.STRING, + number=3357091, + optional=True, + ) + shielded_instance_initial_state = proto.Field( + proto.MESSAGE, + number=192356867, + optional=True, + message='InitialStateConfig', + ) + source = proto.Field( + proto.STRING, + number=177235995, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class AttachedDiskInitializeParams(proto.Message): + r"""[Input Only] Specifies the parameters for a new disk that will be + created alongside the new instance. Use initialization parameters to + create boot disks or local SSDs attached to the new instance. This + property is mutually exclusive with the source property; you can + only define one or the other, but not both. + + Attributes: + description (str): + An optional description. Provide this + property when creating the disk. + + This field is a member of `oneof`_ ``_description``. + disk_name (str): + Specifies the disk name. If not specified, + the default is to use the name of the instance. + If a disk with the same name already exists in + the given region, the existing disk is attached + to the new instance and the new disk is not + created. + + This field is a member of `oneof`_ ``_disk_name``. + disk_size_gb (int): + Specifies the size of the disk in base-2 GB. + The size must be at least 10 GB. If you specify + a sourceImage, which is required for boot disks, + the default size is the size of the sourceImage. + If you do not specify a sourceImage, the default + disk size is 500 GB. + + This field is a member of `oneof`_ ``_disk_size_gb``. + disk_type (str): + Specifies the disk type to use to create the + instance. If not specified, the default is pd- + standard, specified using the full URL. For + example: + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /diskTypes/pd-standard For a full list of + acceptable values, see Persistent disk types. If + you define this field, you can provide either + the full or partial URL. For example, the + following are valid values: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /diskTypes/diskType - + projects/project/zones/zone/diskTypes/diskType - + zones/zone/diskTypes/diskType Note that for + InstanceTemplate, this is the name of the disk + type, not URL. + + This field is a member of `oneof`_ ``_disk_type``. + labels (Sequence[google.cloud.compute_v1.types.AttachedDiskInitializeParams.LabelsEntry]): + Labels to apply to this disk. These can be + later modified by the disks.setLabels method. + This field is only applicable for persistent + disks. + on_update_action (str): + Specifies which action to take on instance + update with this disk. Default is to use the + existing disk. Check the OnUpdateAction enum for + the list of possible values. + + This field is a member of `oneof`_ ``_on_update_action``. + provisioned_iops (int): + Indicates how many IOPS to provision for the + disk. This sets the number of I/O operations per + second that the disk can handle. Values must be + between 10,000 and 120,000. For more details, + see the Extreme persistent disk documentation. + + This field is a member of `oneof`_ ``_provisioned_iops``. + resource_policies (Sequence[str]): + Resource policies applied to this disk for + automatic snapshot creations. Specified using + the full or partial URL. For instance template, + specify only the resource policy name. + source_image (str): + The source image to create this disk. When + creating a new instance, one of + initializeParams.sourceImage or + initializeParams.sourceSnapshot or disks.source + is required except for local SSD. To create a + disk with one of the public operating system + images, specify the image by its family name. + For example, specify family/debian-9 to use the + latest Debian 9 image: projects/debian- + cloud/global/images/family/debian-9 + Alternatively, use a specific version of a + public operating system image: projects/debian- + cloud/global/images/debian-9-stretch-vYYYYMMDD + To create a disk with a custom image that you + created, specify the image name in the following + format: global/images/my-custom-image You can + also specify a custom image by its image family, + which returns the latest version of the image in + that family. Replace the image name with + family/family-name: global/images/family/my- + image-family If the source image is deleted + later, this field will not be set. + + This field is a member of `oneof`_ ``_source_image``. + source_image_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source image. Required if the source image is + protected by a customer-supplied encryption key. + Instance templates do not store customer- + supplied encryption keys, so you cannot create + disks for instances in a managed instance group + if the source images are encrypted with your own + keys. + + This field is a member of `oneof`_ ``_source_image_encryption_key``. + source_snapshot (str): + The source snapshot to create this disk. When + creating a new instance, one of + initializeParams.sourceSnapshot or + initializeParams.sourceImage or disks.source is + required except for local SSD. To create a disk + with a snapshot that you created, specify the + snapshot name in the following format: + global/snapshots/my-backup If the source + snapshot is deleted later, this field will not + be set. + + This field is a member of `oneof`_ ``_source_snapshot``. + source_snapshot_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source snapshot. + + This field is a member of `oneof`_ ``_source_snapshot_encryption_key``. + """ + class OnUpdateAction(proto.Enum): + r"""Specifies which action to take on instance update with this + disk. Default is to use the existing disk. + """ + UNDEFINED_ON_UPDATE_ACTION = 0 + RECREATE_DISK = 494767853 + RECREATE_DISK_IF_SOURCE_CHANGED = 398099712 + USE_EXISTING_DISK = 232682233 + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disk_name = proto.Field( + proto.STRING, + number=92807149, + optional=True, + ) + disk_size_gb = proto.Field( + proto.INT64, + number=316263735, + optional=True, + ) + disk_type = proto.Field( + proto.STRING, + number=93009052, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + on_update_action = proto.Field( + proto.STRING, + number=202451980, + optional=True, + ) + provisioned_iops = proto.Field( + proto.INT64, + number=186769108, + optional=True, + ) + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + source_image = proto.Field( + proto.STRING, + number=50443319, + optional=True, + ) + source_image_encryption_key = proto.Field( + proto.MESSAGE, + number=381503659, + optional=True, + message='CustomerEncryptionKey', + ) + source_snapshot = proto.Field( + proto.STRING, + number=126061928, + optional=True, + ) + source_snapshot_encryption_key = proto.Field( + proto.MESSAGE, + number=303679322, + optional=True, + message='CustomerEncryptionKey', + ) + + +class AuditConfig(proto.Message): + r"""Specifies the audit configuration for a service. The configuration + determines which permission types are logged, and what identities, + if any, are exempted from logging. An AuditConfig must have one or + more AuditLogConfigs. If there are AuditConfigs for both + ``allServices`` and a specific service, the union of the two + AuditConfigs is used for that service: the log_types specified in + each AuditConfig are enabled, and the exempted_members in each + AuditLogConfig are exempted. Example Policy with multiple + AuditConfigs: { "audit_configs": [ { "service": "allServices", + "audit_log_configs": [ { "log_type": "DATA_READ", + "exempted_members": [ "user:jose@example.com" ] }, { "log_type": + "DATA_WRITE" }, { "log_type": "ADMIN_READ" } ] }, { "service": + "sampleservice.googleapis.com", "audit_log_configs": [ { "log_type": + "DATA_READ" }, { "log_type": "DATA_WRITE", "exempted_members": [ + "user:aliya@example.com" ] } ] } ] } For sampleservice, this policy + enables DATA_READ, DATA_WRITE and ADMIN_READ logging. It also + exempts jose@example.com from DATA_READ logging, and + aliya@example.com from DATA_WRITE logging. + + Attributes: + audit_log_configs (Sequence[google.cloud.compute_v1.types.AuditLogConfig]): + The configuration for logging of each type of + permission. + exempted_members (Sequence[str]): + This is deprecated and has no effect. Do not + use. + service (str): + Specifies a service that will be enabled for audit logging. + For example, ``storage.googleapis.com``, + ``cloudsql.googleapis.com``. ``allServices`` is a special + value that covers all services. + + This field is a member of `oneof`_ ``_service``. + """ + + audit_log_configs = proto.RepeatedField( + proto.MESSAGE, + number=488420626, + message='AuditLogConfig', + ) + exempted_members = proto.RepeatedField( + proto.STRING, + number=232615576, + ) + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + + +class AuditLogConfig(proto.Message): + r"""Provides the configuration for logging a type of permissions. + Example: { "audit_log_configs": [ { "log_type": "DATA_READ", + "exempted_members": [ "user:jose@example.com" ] }, { "log_type": + "DATA_WRITE" } ] } This enables 'DATA_READ' and 'DATA_WRITE' + logging, while exempting jose@example.com from DATA_READ logging. + + Attributes: + exempted_members (Sequence[str]): + Specifies the identities that do not cause + logging for this type of permission. Follows the + same format of Binding.members. + ignore_child_exemptions (bool): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_ignore_child_exemptions``. + log_type (str): + The log type that this config enables. + Check the LogType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_log_type``. + """ + class LogType(proto.Enum): + r"""The log type that this config enables.""" + UNDEFINED_LOG_TYPE = 0 + ADMIN_READ = 128951462 + DATA_READ = 305224971 + DATA_WRITE = 340181738 + LOG_TYPE_UNSPECIFIED = 154527053 + + exempted_members = proto.RepeatedField( + proto.STRING, + number=232615576, + ) + ignore_child_exemptions = proto.Field( + proto.BOOL, + number=70141850, + optional=True, + ) + log_type = proto.Field( + proto.STRING, + number=403115861, + optional=True, + ) + + +class AuthorizationLoggingOptions(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + permission_type (str): + This is deprecated and has no effect. Do not + use. Check the PermissionType enum for the list + of possible values. + + This field is a member of `oneof`_ ``_permission_type``. + """ + class PermissionType(proto.Enum): + r"""This is deprecated and has no effect. Do not use.""" + UNDEFINED_PERMISSION_TYPE = 0 + ADMIN_READ = 128951462 + ADMIN_WRITE = 244412079 + DATA_READ = 305224971 + DATA_WRITE = 340181738 + PERMISSION_TYPE_UNSPECIFIED = 440313346 + + permission_type = proto.Field( + proto.STRING, + number=525978538, + optional=True, + ) + + +class Autoscaler(proto.Message): + r"""Represents an Autoscaler resource. Google Compute Engine has two + Autoscaler resources: \* + `Zonal `__ \* + `Regional `__ Use + autoscalers to automatically add or delete instances from a managed + instance group according to your defined autoscaling policy. For + more information, read Autoscaling Groups of Instances. For zonal + managed instance groups resource, use the autoscaler resource. For + regional managed instance groups, use the regionAutoscalers + resource. + + Attributes: + autoscaling_policy (google.cloud.compute_v1.types.AutoscalingPolicy): + The configuration parameters for the + autoscaling algorithm. You can define one or + more signals for an autoscaler: cpuUtilization, + customMetricUtilizations, and + loadBalancingUtilization. If none of these are + specified, the default will be to autoscale + based on cpuUtilization to 0.6 or 60%. + + This field is a member of `oneof`_ ``_autoscaling_policy``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#autoscaler for autoscalers. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + recommended_size (int): + [Output Only] Target recommended MIG size (number of + instances) computed by autoscaler. Autoscaler calculates the + recommended MIG size even when the autoscaling policy mode + is different from ON. This field is empty when autoscaler is + not connected to an existing managed instance group or + autoscaler did not generate its prediction. + + This field is a member of `oneof`_ ``_recommended_size``. + region (str): + [Output Only] URL of the region where the instance group + resides (for autoscalers living in regional scope). + + This field is a member of `oneof`_ ``_region``. + scaling_schedule_status (Sequence[google.cloud.compute_v1.types.Autoscaler.ScalingScheduleStatusEntry]): + [Output Only] Status information of existing scaling + schedules. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + status (str): + [Output Only] The status of the autoscaler configuration. + Current set of possible values: - PENDING: Autoscaler + backend hasn't read new/updated configuration. - DELETING: + Configuration is being deleted. - ACTIVE: Configuration is + acknowledged to be effective. Some warnings might be present + in the statusDetails field. - ERROR: Configuration has + errors. Actionable for users. Details are present in the + statusDetails field. New values might be added in the + future. Check the Status enum for the list of possible + values. + + This field is a member of `oneof`_ ``_status``. + status_details (Sequence[google.cloud.compute_v1.types.AutoscalerStatusDetails]): + [Output Only] Human-readable details about the current state + of the autoscaler. Read the documentation for Commonly + returned status messages for examples of status messages you + might encounter. + target (str): + URL of the managed instance group that this + autoscaler will scale. This field is required + when creating an autoscaler. + + This field is a member of `oneof`_ ``_target``. + zone (str): + [Output Only] URL of the zone where the instance group + resides (for autoscalers living in zonal scope). + + This field is a member of `oneof`_ ``_zone``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the autoscaler configuration. Current + set of possible values: - PENDING: Autoscaler backend hasn't read + new/updated configuration. - DELETING: Configuration is being + deleted. - ACTIVE: Configuration is acknowledged to be effective. + Some warnings might be present in the statusDetails field. - ERROR: + Configuration has errors. Actionable for users. Details are present + in the statusDetails field. New values might be added in the future. + """ + UNDEFINED_STATUS = 0 + ACTIVE = 314733318 + DELETING = 528602024 + ERROR = 66247144 + PENDING = 35394935 + + autoscaling_policy = proto.Field( + proto.MESSAGE, + number=221950041, + optional=True, + message='AutoscalingPolicy', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + recommended_size = proto.Field( + proto.INT32, + number=257915749, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + scaling_schedule_status = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=465950178, + message='ScalingScheduleStatus', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + status_details = proto.RepeatedField( + proto.MESSAGE, + number=363353845, + message='AutoscalerStatusDetails', + ) + target = proto.Field( + proto.STRING, + number=192835985, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class AutoscalerAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.AutoscalerAggregatedList.ItemsEntry]): + A list of AutoscalersScopedList resources. + kind (str): + [Output Only] Type of resource. Always + compute#autoscalerAggregatedList for aggregated lists of + autoscalers. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. end_interface: + MixerListResponseWithEtagBuilder + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='AutoscalersScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AutoscalerList(proto.Message): + r"""Contains a list of Autoscaler resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Autoscaler]): + A list of Autoscaler resources. + kind (str): + [Output Only] Type of resource. Always + compute#autoscalerList for lists of autoscalers. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Autoscaler', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AutoscalerStatusDetails(proto.Message): + r""" + + Attributes: + message (str): + The status message. + + This field is a member of `oneof`_ ``_message``. + type_ (str): + The type of error, warning, or notice returned. Current set + of possible values: - ALL_INSTANCES_UNHEALTHY (WARNING): All + instances in the instance group are unhealthy (not in + RUNNING state). - BACKEND_SERVICE_DOES_NOT_EXIST (ERROR): + There is no backend service attached to the instance group. + - CAPPED_AT_MAX_NUM_REPLICAS (WARNING): Autoscaler + recommends a size greater than maxNumReplicas. - + CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE (WARNING): The custom + metric samples are not exported often enough to be a + credible base for autoscaling. - CUSTOM_METRIC_INVALID + (ERROR): The custom metric that was specified does not exist + or does not have the necessary labels. - MIN_EQUALS_MAX + (WARNING): The minNumReplicas is equal to maxNumReplicas. + This means the autoscaler cannot add or remove instances + from the instance group. - MISSING_CUSTOM_METRIC_DATA_POINTS + (WARNING): The autoscaler did not receive any data from the + custom metric configured for autoscaling. - + MISSING_LOAD_BALANCING_DATA_POINTS (WARNING): The autoscaler + is configured to scale based on a load balancing signal but + the instance group has not received any requests from the + load balancer. - MODE_OFF (WARNING): Autoscaling is turned + off. The number of instances in the group won't change + automatically. The autoscaling configuration is preserved. - + MODE_ONLY_UP (WARNING): Autoscaling is in the "Autoscale + only out" mode. The autoscaler can add instances but not + remove any. - MORE_THAN_ONE_BACKEND_SERVICE (ERROR): The + instance group cannot be autoscaled because it has more than + one backend service attached to it. - + NOT_ENOUGH_QUOTA_AVAILABLE (ERROR): There is insufficient + quota for the necessary resources, such as CPU or number of + instances. - REGION_RESOURCE_STOCKOUT (ERROR): Shown only + for regional autoscalers: there is a resource stockout in + the chosen region. - SCALING_TARGET_DOES_NOT_EXIST (ERROR): + The target to be scaled does not exist. - + UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION (ERROR): + Autoscaling does not work with an HTTP/S load balancer that + has been configured for maxRate. - ZONE_RESOURCE_STOCKOUT + (ERROR): For zonal autoscalers: there is a resource stockout + in the chosen zone. For regional autoscalers: in at least + one of the zones you're using there is a resource stockout. + New values might be added in the future. Some of the values + might not be available in all API versions. Check the Type + enum for the list of possible values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""The type of error, warning, or notice returned. Current set of + possible values: - ALL_INSTANCES_UNHEALTHY (WARNING): All instances + in the instance group are unhealthy (not in RUNNING state). - + BACKEND_SERVICE_DOES_NOT_EXIST (ERROR): There is no backend service + attached to the instance group. - CAPPED_AT_MAX_NUM_REPLICAS + (WARNING): Autoscaler recommends a size greater than maxNumReplicas. + - CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE (WARNING): The custom metric + samples are not exported often enough to be a credible base for + autoscaling. - CUSTOM_METRIC_INVALID (ERROR): The custom metric that + was specified does not exist or does not have the necessary labels. + - MIN_EQUALS_MAX (WARNING): The minNumReplicas is equal to + maxNumReplicas. This means the autoscaler cannot add or remove + instances from the instance group. - + MISSING_CUSTOM_METRIC_DATA_POINTS (WARNING): The autoscaler did not + receive any data from the custom metric configured for autoscaling. + - MISSING_LOAD_BALANCING_DATA_POINTS (WARNING): The autoscaler is + configured to scale based on a load balancing signal but the + instance group has not received any requests from the load balancer. + - MODE_OFF (WARNING): Autoscaling is turned off. The number of + instances in the group won't change automatically. The autoscaling + configuration is preserved. - MODE_ONLY_UP (WARNING): Autoscaling is + in the "Autoscale only out" mode. The autoscaler can add instances + but not remove any. - MORE_THAN_ONE_BACKEND_SERVICE (ERROR): The + instance group cannot be autoscaled because it has more than one + backend service attached to it. - NOT_ENOUGH_QUOTA_AVAILABLE + (ERROR): There is insufficient quota for the necessary resources, + such as CPU or number of instances. - REGION_RESOURCE_STOCKOUT + (ERROR): Shown only for regional autoscalers: there is a resource + stockout in the chosen region. - SCALING_TARGET_DOES_NOT_EXIST + (ERROR): The target to be scaled does not exist. - + UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION (ERROR): + Autoscaling does not work with an HTTP/S load balancer that has been + configured for maxRate. - ZONE_RESOURCE_STOCKOUT (ERROR): For zonal + autoscalers: there is a resource stockout in the chosen zone. For + regional autoscalers: in at least one of the zones you're using + there is a resource stockout. New values might be added in the + future. Some of the values might not be available in all API + versions. + """ + UNDEFINED_TYPE = 0 + ALL_INSTANCES_UNHEALTHY = 404965477 + BACKEND_SERVICE_DOES_NOT_EXIST = 191417626 + CAPPED_AT_MAX_NUM_REPLICAS = 518617 + CUSTOM_METRIC_DATA_POINTS_TOO_SPARSE = 328964659 + CUSTOM_METRIC_INVALID = 204430550 + MIN_EQUALS_MAX = 2821361 + MISSING_CUSTOM_METRIC_DATA_POINTS = 94885086 + MISSING_LOAD_BALANCING_DATA_POINTS = 509858898 + MODE_OFF = 164169907 + MODE_ONLY_SCALE_OUT = 3840994 + MODE_ONLY_UP = 100969842 + MORE_THAN_ONE_BACKEND_SERVICE = 151922141 + NOT_ENOUGH_QUOTA_AVAILABLE = 403101631 + REGION_RESOURCE_STOCKOUT = 528622846 + SCALING_TARGET_DOES_NOT_EXIST = 122636699 + SCHEDULED_INSTANCES_GREATER_THAN_AUTOSCALER_MAX = 29275586 + SCHEDULED_INSTANCES_LESS_THAN_AUTOSCALER_MIN = 398287669 + UNKNOWN = 433141802 + UNSUPPORTED_MAX_RATE_LOAD_BALANCING_CONFIGURATION = 330845009 + ZONE_RESOURCE_STOCKOUT = 210200502 + + message = proto.Field( + proto.STRING, + number=418054151, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class AutoscalersScopedList(proto.Message): + r""" + + Attributes: + autoscalers (Sequence[google.cloud.compute_v1.types.Autoscaler]): + [Output Only] A list of autoscalers contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of autoscalers when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + autoscalers = proto.RepeatedField( + proto.MESSAGE, + number=465771644, + message='Autoscaler', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class AutoscalingPolicy(proto.Message): + r"""Cloud Autoscaler policy. + + Attributes: + cool_down_period_sec (int): + The number of seconds that the autoscaler + waits before it starts collecting information + from a new instance. This prevents the + autoscaler from collecting information when the + instance is initializing, during which the + collected usage would not be reliable. The + default time autoscaler waits is 60 seconds. + Virtual machine initialization times might vary + because of numerous factors. We recommend that + you test how long an instance may take to + initialize. To do this, create an instance and + time the startup process. + + This field is a member of `oneof`_ ``_cool_down_period_sec``. + cpu_utilization (google.cloud.compute_v1.types.AutoscalingPolicyCpuUtilization): + Defines the CPU utilization policy that + allows the autoscaler to scale based on the + average CPU utilization of a managed instance + group. + + This field is a member of `oneof`_ ``_cpu_utilization``. + custom_metric_utilizations (Sequence[google.cloud.compute_v1.types.AutoscalingPolicyCustomMetricUtilization]): + Configuration parameters of autoscaling based + on a custom metric. + load_balancing_utilization (google.cloud.compute_v1.types.AutoscalingPolicyLoadBalancingUtilization): + Configuration parameters of autoscaling based + on load balancer. + + This field is a member of `oneof`_ ``_load_balancing_utilization``. + max_num_replicas (int): + The maximum number of instances that the + autoscaler can scale out to. This is required + when creating or updating an autoscaler. The + maximum number of replicas must not be lower + than minimal number of replicas. + + This field is a member of `oneof`_ ``_max_num_replicas``. + min_num_replicas (int): + The minimum number of replicas that the + autoscaler can scale in to. This cannot be less + than 0. If not provided, autoscaler chooses a + default value depending on maximum number of + instances allowed. + + This field is a member of `oneof`_ ``_min_num_replicas``. + mode (str): + Defines operating mode for this policy. + Check the Mode enum for the list of possible + values. + + This field is a member of `oneof`_ ``_mode``. + scale_in_control (google.cloud.compute_v1.types.AutoscalingPolicyScaleInControl): + + This field is a member of `oneof`_ ``_scale_in_control``. + scaling_schedules (Sequence[google.cloud.compute_v1.types.AutoscalingPolicy.ScalingSchedulesEntry]): + Scaling schedules defined for an autoscaler. Multiple + schedules can be set on an autoscaler, and they can overlap. + During overlapping periods the greatest + min_required_replicas of all scaling schedules is applied. + Up to 128 scaling schedules are allowed. + """ + class Mode(proto.Enum): + r"""Defines operating mode for this policy.""" + UNDEFINED_MODE = 0 + OFF = 78159 + ON = 2527 + ONLY_SCALE_OUT = 152713670 + ONLY_UP = 478095374 + + cool_down_period_sec = proto.Field( + proto.INT32, + number=107692954, + optional=True, + ) + cpu_utilization = proto.Field( + proto.MESSAGE, + number=381211147, + optional=True, + message='AutoscalingPolicyCpuUtilization', + ) + custom_metric_utilizations = proto.RepeatedField( + proto.MESSAGE, + number=131972850, + message='AutoscalingPolicyCustomMetricUtilization', + ) + load_balancing_utilization = proto.Field( + proto.MESSAGE, + number=429746403, + optional=True, + message='AutoscalingPolicyLoadBalancingUtilization', + ) + max_num_replicas = proto.Field( + proto.INT32, + number=62327375, + optional=True, + ) + min_num_replicas = proto.Field( + proto.INT32, + number=535329825, + optional=True, + ) + mode = proto.Field( + proto.STRING, + number=3357091, + optional=True, + ) + scale_in_control = proto.Field( + proto.MESSAGE, + number=527670872, + optional=True, + message='AutoscalingPolicyScaleInControl', + ) + scaling_schedules = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=355416580, + message='AutoscalingPolicyScalingSchedule', + ) + + +class AutoscalingPolicyCpuUtilization(proto.Message): + r"""CPU utilization policy. + + Attributes: + predictive_method (str): + Indicates whether predictive autoscaling based on CPU metric + is enabled. Valid values are: \* NONE (default). No + predictive method is used. The autoscaler scales the group + to meet current demand based on real-time metrics. \* + OPTIMIZE_AVAILABILITY. Predictive autoscaling improves + availability by monitoring daily and weekly load patterns + and scaling out ahead of anticipated demand. Check the + PredictiveMethod enum for the list of possible values. + + This field is a member of `oneof`_ ``_predictive_method``. + utilization_target (float): + The target CPU utilization that the autoscaler maintains. + Must be a float value in the range (0, 1]. If not specified, + the default is 0.6. If the CPU level is below the target + utilization, the autoscaler scales in the number of + instances until it reaches the minimum number of instances + you specified or until the average CPU of your instances + reaches the target utilization. If the average CPU is above + the target utilization, the autoscaler scales out until it + reaches the maximum number of instances you specified or + until the average utilization reaches the target + utilization. + + This field is a member of `oneof`_ ``_utilization_target``. + """ + class PredictiveMethod(proto.Enum): + r"""Indicates whether predictive autoscaling based on CPU metric is + enabled. Valid values are: \* NONE (default). No predictive method + is used. The autoscaler scales the group to meet current demand + based on real-time metrics. \* OPTIMIZE_AVAILABILITY. Predictive + autoscaling improves availability by monitoring daily and weekly + load patterns and scaling out ahead of anticipated demand. + """ + UNDEFINED_PREDICTIVE_METHOD = 0 + NONE = 2402104 + OPTIMIZE_AVAILABILITY = 11629437 + + predictive_method = proto.Field( + proto.STRING, + number=390220737, + optional=True, + ) + utilization_target = proto.Field( + proto.DOUBLE, + number=215905870, + optional=True, + ) + + +class AutoscalingPolicyCustomMetricUtilization(proto.Message): + r"""Custom utilization metric policy. + + Attributes: + filter (str): + A filter string, compatible with a Stackdriver Monitoring + filter string for TimeSeries.list API call. This filter is + used to select a specific TimeSeries for the purpose of + autoscaling and to determine whether the metric is exporting + per-instance or per-group data. For the filter to be valid + for autoscaling purposes, the following rules apply: - You + can only use the AND operator for joining selectors. - You + can only use direct equality comparison operator (=) without + any functions for each selector. - You can specify the + metric in both the filter string and in the metric field. + However, if specified in both places, the metric must be + identical. - The monitored resource type determines what + kind of values are expected for the metric. If it is a + gce_instance, the autoscaler expects the metric to include a + separate TimeSeries for each instance in a group. In such a + case, you cannot filter on resource labels. If the resource + type is any other value, the autoscaler expects this metric + to contain values that apply to the entire autoscaled + instance group and resource label filtering can be performed + to point autoscaler at the correct TimeSeries to scale upon. + This is called a *per-group metric* for the purpose of + autoscaling. If not specified, the type defaults to + gce_instance. Try to provide a filter that is selective + enough to pick just one TimeSeries for the autoscaled group + or for each of the instances (if you are using gce_instance + resource type). If multiple TimeSeries are returned upon the + query execution, the autoscaler will sum their respective + values to obtain its scaling value. + + This field is a member of `oneof`_ ``_filter``. + metric (str): + The identifier (type) of the Stackdriver + Monitoring metric. The metric cannot have + negative values. The metric must have a value + type of INT64 or DOUBLE. + + This field is a member of `oneof`_ ``_metric``. + single_instance_assignment (float): + If scaling is based on a per-group metric value that + represents the total amount of work to be done or resource + usage, set this value to an amount assigned for a single + instance of the scaled group. Autoscaler keeps the number of + instances proportional to the value of this metric. The + metric itself does not change value due to group resizing. A + good metric to use with the target is for example + pubsub.googleapis.com/subscription/num_undelivered_messages + or a custom metric exporting the total number of requests + coming to your instances. A bad example would be a metric + exporting an average or median latency, since this value + can't include a chunk assignable to a single instance, it + could be better used with utilization_target instead. + + This field is a member of `oneof`_ ``_single_instance_assignment``. + utilization_target (float): + The target value of the metric that autoscaler maintains. + This must be a positive value. A utilization metric scales + number of virtual machines handling requests to increase or + decrease proportionally to the metric. For example, a good + metric to use as a utilization_target is + https://www.googleapis.com/compute/v1/instance/network/received_bytes_count. + The autoscaler works to keep this value constant for each of + the instances. + + This field is a member of `oneof`_ ``_utilization_target``. + utilization_target_type (str): + Defines how target utilization value is expressed for a + Stackdriver Monitoring metric. Either GAUGE, + DELTA_PER_SECOND, or DELTA_PER_MINUTE. Check the + UtilizationTargetType enum for the list of possible values. + + This field is a member of `oneof`_ ``_utilization_target_type``. + """ + class UtilizationTargetType(proto.Enum): + r"""Defines how target utilization value is expressed for a Stackdriver + Monitoring metric. Either GAUGE, DELTA_PER_SECOND, or + DELTA_PER_MINUTE. + """ + UNDEFINED_UTILIZATION_TARGET_TYPE = 0 + DELTA_PER_MINUTE = 87432861 + DELTA_PER_SECOND = 255180029 + GAUGE = 67590361 + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + metric = proto.Field( + proto.STRING, + number=533067184, + optional=True, + ) + single_instance_assignment = proto.Field( + proto.DOUBLE, + number=504768064, + optional=True, + ) + utilization_target = proto.Field( + proto.DOUBLE, + number=215905870, + optional=True, + ) + utilization_target_type = proto.Field( + proto.STRING, + number=340169355, + optional=True, + ) + + +class AutoscalingPolicyLoadBalancingUtilization(proto.Message): + r"""Configuration parameters of autoscaling based on load + balancing. + + Attributes: + utilization_target (float): + Fraction of backend capacity utilization (set + in HTTP(S) load balancing configuration) that + the autoscaler maintains. Must be a positive + float value. If not defined, the default is 0.8. + + This field is a member of `oneof`_ ``_utilization_target``. + """ + + utilization_target = proto.Field( + proto.DOUBLE, + number=215905870, + optional=True, + ) + + +class AutoscalingPolicyScaleInControl(proto.Message): + r"""Configuration that allows for slower scale in so that even if + Autoscaler recommends an abrupt scale in of a MIG, it will be + throttled as specified by the parameters below. + + Attributes: + max_scaled_in_replicas (google.cloud.compute_v1.types.FixedOrPercent): + Maximum allowed number (or %) of VMs that can + be deducted from the peak recommendation during + the window autoscaler looks at when computing + recommendations. Possibly all these VMs can be + deleted at once so user service needs to be + prepared to lose that many VMs in one step. + + This field is a member of `oneof`_ ``_max_scaled_in_replicas``. + time_window_sec (int): + How far back autoscaling looks when computing + recommendations to include directives regarding + slower scale in, as described above. + + This field is a member of `oneof`_ ``_time_window_sec``. + """ + + max_scaled_in_replicas = proto.Field( + proto.MESSAGE, + number=180710123, + optional=True, + message='FixedOrPercent', + ) + time_window_sec = proto.Field( + proto.INT32, + number=36405300, + optional=True, + ) + + +class AutoscalingPolicyScalingSchedule(proto.Message): + r"""Scaling based on user-defined schedule. The message describes + a single scaling schedule. A scaling schedule changes the + minimum number of VM instances an autoscaler can recommend, + which can trigger scaling out. + + Attributes: + description (str): + A description of a scaling schedule. + + This field is a member of `oneof`_ ``_description``. + disabled (bool): + A boolean value that specifies whether a + scaling schedule can influence autoscaler + recommendations. If set to true, then a scaling + schedule has no effect. This field is optional, + and its value is false by default. + + This field is a member of `oneof`_ ``_disabled``. + duration_sec (int): + The duration of time intervals, in seconds, + for which this scaling schedule is to run. The + minimum allowed value is 300. This field is + required. + + This field is a member of `oneof`_ ``_duration_sec``. + min_required_replicas (int): + The minimum number of VM instances that the + autoscaler will recommend in time intervals + starting according to schedule. This field is + required. + + This field is a member of `oneof`_ ``_min_required_replicas``. + schedule (str): + The start timestamps of time intervals when this scaling + schedule is to provide a scaling signal. This field uses the + extended cron format (with an optional year field). The + expression can describe a single timestamp if the optional + year is set, in which case the scaling schedule runs once. + The schedule is interpreted with respect to time_zone. This + field is required. Note: These timestamps only describe when + autoscaler starts providing the scaling signal. The VMs need + additional time to become serving. + + This field is a member of `oneof`_ ``_schedule``. + time_zone (str): + The time zone to use when interpreting the schedule. The + value of this field must be a time zone name from the tz + database: http://en.wikipedia.org/wiki/Tz_database. This + field is assigned a default value of ���UTC��� if left + empty. + + This field is a member of `oneof`_ ``_time_zone``. + """ + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disabled = proto.Field( + proto.BOOL, + number=270940796, + optional=True, + ) + duration_sec = proto.Field( + proto.INT32, + number=212356902, + optional=True, + ) + min_required_replicas = proto.Field( + proto.INT32, + number=365514414, + optional=True, + ) + schedule = proto.Field( + proto.STRING, + number=375820951, + optional=True, + ) + time_zone = proto.Field( + proto.STRING, + number=36848094, + optional=True, + ) + + +class Backend(proto.Message): + r"""Message containing information of one individual backend. + + Attributes: + balancing_mode (str): + Specifies how to determine whether the + backend of a load balancer can handle additional + traffic or is fully loaded. For usage + guidelines, see Connection balancing mode. + Backends must use compatible balancing modes. + For more information, see Supported balancing + modes and target capacity settings and + Restrictions and guidance for instance groups. + Note: Currently, if you use the API to configure + incompatible balancing modes, the configuration + might be accepted even though it has no impact + and is ignored. Specifically, + Backend.maxUtilization is ignored when + Backend.balancingMode is RATE. In the future, + this incompatible combination will be rejected. + Check the BalancingMode enum for the list of + possible values. + + This field is a member of `oneof`_ ``_balancing_mode``. + capacity_scaler (float): + A multiplier applied to the backend's target capacity of its + balancing mode. The default value is 1, which means the + group serves up to 100% of its configured capacity + (depending on balancingMode). A setting of 0 means the group + is completely drained, offering 0% of its available + capacity. The valid ranges are 0.0 and [0.1,1.0]. You cannot + configure a setting larger than 0 and smaller than 0.1. You + cannot configure a setting of 0 when there is only one + backend attached to the backend service. + + This field is a member of `oneof`_ ``_capacity_scaler``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + failover (bool): + This field designates whether this is a + failover backend. More than one failover backend + can be configured for a given BackendService. + + This field is a member of `oneof`_ ``_failover``. + group (str): + The fully-qualified URL of an instance group or network + endpoint group (NEG) resource. To determine what types of + backends a load balancer supports, see the `Backend services + overview `__. + You must use the *fully-qualified* URL (starting with + https://www.googleapis.com/) to specify the instance group + or NEG. Partial URLs are not supported. + + This field is a member of `oneof`_ ``_group``. + max_connections (int): + Defines a target maximum number of + simultaneous connections. For usage guidelines, + see Connection balancing mode and Utilization + balancing mode. Not available if the backend's + balancingMode is RATE. + + This field is a member of `oneof`_ ``_max_connections``. + max_connections_per_endpoint (int): + Defines a target maximum number of + simultaneous connections. For usage guidelines, + see Connection balancing mode and Utilization + balancing mode. Not available if the backend's + balancingMode is RATE. + + This field is a member of `oneof`_ ``_max_connections_per_endpoint``. + max_connections_per_instance (int): + Defines a target maximum number of + simultaneous connections. For usage guidelines, + see Connection balancing mode and Utilization + balancing mode. Not available if the backend's + balancingMode is RATE. + + This field is a member of `oneof`_ ``_max_connections_per_instance``. + max_rate (int): + Defines a maximum number of HTTP requests per + second (RPS). For usage guidelines, see Rate + balancing mode and Utilization balancing mode. + Not available if the backend's balancingMode is + CONNECTION. + + This field is a member of `oneof`_ ``_max_rate``. + max_rate_per_endpoint (float): + Defines a maximum target for requests per + second (RPS). For usage guidelines, see Rate + balancing mode and Utilization balancing mode. + Not available if the backend's balancingMode is + CONNECTION. + + This field is a member of `oneof`_ ``_max_rate_per_endpoint``. + max_rate_per_instance (float): + Defines a maximum target for requests per + second (RPS). For usage guidelines, see Rate + balancing mode and Utilization balancing mode. + Not available if the backend's balancingMode is + CONNECTION. + + This field is a member of `oneof`_ ``_max_rate_per_instance``. + max_utilization (float): + Optional parameter to define a target capacity for the + UTILIZATIONbalancing mode. The valid range is [0.0, 1.0]. + For usage guidelines, see Utilization balancing mode. + + This field is a member of `oneof`_ ``_max_utilization``. + """ + class BalancingMode(proto.Enum): + r"""Specifies how to determine whether the backend of a load + balancer can handle additional traffic or is fully loaded. For + usage guidelines, see Connection balancing mode. Backends must + use compatible balancing modes. For more information, see + Supported balancing modes and target capacity settings and + Restrictions and guidance for instance groups. Note: Currently, + if you use the API to configure incompatible balancing modes, + the configuration might be accepted even though it has no impact + and is ignored. Specifically, Backend.maxUtilization is ignored + when Backend.balancingMode is RATE. In the future, this + incompatible combination will be rejected. + """ + UNDEFINED_BALANCING_MODE = 0 + CONNECTION = 246311646 + RATE = 2508000 + UTILIZATION = 157008386 + + balancing_mode = proto.Field( + proto.STRING, + number=430286217, + optional=True, + ) + capacity_scaler = proto.Field( + proto.FLOAT, + number=315958157, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + failover = proto.Field( + proto.BOOL, + number=138892530, + optional=True, + ) + group = proto.Field( + proto.STRING, + number=98629247, + optional=True, + ) + max_connections = proto.Field( + proto.INT32, + number=110652154, + optional=True, + ) + max_connections_per_endpoint = proto.Field( + proto.INT32, + number=216904604, + optional=True, + ) + max_connections_per_instance = proto.Field( + proto.INT32, + number=104671900, + optional=True, + ) + max_rate = proto.Field( + proto.INT32, + number=408035035, + optional=True, + ) + max_rate_per_endpoint = proto.Field( + proto.FLOAT, + number=129832283, + optional=True, + ) + max_rate_per_instance = proto.Field( + proto.FLOAT, + number=17599579, + optional=True, + ) + max_utilization = proto.Field( + proto.FLOAT, + number=148192199, + optional=True, + ) + + +class BackendBucket(proto.Message): + r"""Represents a Cloud Storage Bucket resource. This Cloud + Storage bucket resource is referenced by a URL map of a load + balancer. For more information, read Backend Buckets. + + Attributes: + bucket_name (str): + Cloud Storage bucket name. + + This field is a member of `oneof`_ ``_bucket_name``. + cdn_policy (google.cloud.compute_v1.types.BackendBucketCdnPolicy): + Cloud CDN configuration for this + BackendBucket. + + This field is a member of `oneof`_ ``_cdn_policy``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + custom_response_headers (Sequence[str]): + Headers that the HTTP/S load balancer should + add to proxied responses. + description (str): + An optional textual description of the + resource; provided by the client when the + resource is created. + + This field is a member of `oneof`_ ``_description``. + enable_cdn (bool): + If true, enable Cloud CDN for this + BackendBucket. + + This field is a member of `oneof`_ ``_enable_cdn``. + id (int): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + Type of the resource. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + """ + + bucket_name = proto.Field( + proto.STRING, + number=283610048, + optional=True, + ) + cdn_policy = proto.Field( + proto.MESSAGE, + number=213976452, + optional=True, + message='BackendBucketCdnPolicy', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + custom_response_headers = proto.RepeatedField( + proto.STRING, + number=387539094, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + enable_cdn = proto.Field( + proto.BOOL, + number=282942321, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class BackendBucketCdnPolicy(proto.Message): + r"""Message containing Cloud CDN configuration for a backend + bucket. + + Attributes: + bypass_cache_on_request_headers (Sequence[google.cloud.compute_v1.types.BackendBucketCdnPolicyBypassCacheOnRequestHeader]): + Bypass the cache when the specified request + headers are matched - e.g. Pragma or + Authorization headers. Up to 5 headers can be + specified. The cache is bypassed for all + cdnPolicy.cacheMode settings. + cache_mode (str): + Specifies the cache setting for all responses from this + backend. The possible values are: USE_ORIGIN_HEADERS + Requires the origin to set valid caching headers to cache + content. Responses without these headers will not be cached + at Google's edge, and will require a full trip to the origin + on every request, potentially impacting performance and + increasing load on the origin server. FORCE_CACHE_ALL Cache + all content, ignoring any "private", "no-store" or + "no-cache" directives in Cache-Control response headers. + Warning: this may result in Cloud CDN caching private, + per-user (user identifiable) content. CACHE_ALL_STATIC + Automatically cache static content, including common image + formats, media (video and audio), and web assets (JavaScript + and CSS). Requests and responses that are marked as + uncacheable, as well as dynamic content (including HTML), + will not be cached. Check the CacheMode enum for the list of + possible values. + + This field is a member of `oneof`_ ``_cache_mode``. + client_ttl (int): + Specifies a separate client (e.g. browser client) maximum + TTL. This is used to clamp the max-age (or Expires) value + sent to the client. With FORCE_CACHE_ALL, the lesser of + client_ttl and default_ttl is used for the response max-age + directive, along with a "public" directive. For cacheable + content in CACHE_ALL_STATIC mode, client_ttl clamps the + max-age from the origin (if specified), or else sets the + response max-age directive to the lesser of the client_ttl + and default_ttl, and also ensures a "public" cache-control + directive is present. If a client TTL is not specified, a + default value (1 hour) will be used. The maximum allowed + value is 86400s (1 day). + + This field is a member of `oneof`_ ``_client_ttl``. + default_ttl (int): + Specifies the default TTL for cached content served by this + origin for responses that do not have an existing valid TTL + (max-age or s-max-age). Setting a TTL of "0" means "always + revalidate". The value of defaultTTL cannot be set to a + value greater than that of maxTTL, but can be equal. When + the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will + overwrite the TTL set in all responses. The maximum allowed + value is 31,622,400s (1 year), noting that infrequently + accessed objects may be evicted from the cache before the + defined TTL. + + This field is a member of `oneof`_ ``_default_ttl``. + max_ttl (int): + Specifies the maximum allowed TTL for cached + content served by this origin. Cache directives + that attempt to set a max-age or s-maxage higher + than this, or an Expires header more than maxTTL + seconds in the future will be capped at the + value of maxTTL, as if it were the value of an + s-maxage Cache-Control directive. Headers sent + to the client will not be modified. Setting a + TTL of "0" means "always revalidate". The + maximum allowed value is 31,622,400s (1 year), + noting that infrequently accessed objects may be + evicted from the cache before the defined TTL. + + This field is a member of `oneof`_ ``_max_ttl``. + negative_caching (bool): + Negative caching allows per-status code TTLs to be set, in + order to apply fine-grained caching for common errors or + redirects. This can reduce the load on your origin and + improve end-user experience by reducing response latency. + When the cache mode is set to CACHE_ALL_STATIC or + USE_ORIGIN_HEADERS, negative caching applies to responses + with the specified response code that lack any + Cache-Control, Expires, or Pragma: no-cache directives. When + the cache mode is set to FORCE_CACHE_ALL, negative caching + applies to all responses with the specified response code, + and override any caching headers. By default, Cloud CDN will + apply the following default TTLs to these status codes: HTTP + 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m + HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal + Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected + Request), 501 (Not Implemented): 60s. These defaults can be + overridden in negative_caching_policy. + + This field is a member of `oneof`_ ``_negative_caching``. + negative_caching_policy (Sequence[google.cloud.compute_v1.types.BackendBucketCdnPolicyNegativeCachingPolicy]): + Sets a cache TTL for the specified HTTP status code. + negative_caching must be enabled to configure + negative_caching_policy. Omitting the policy and leaving + negative_caching enabled will use Cloud CDN's default cache + TTLs. Note that when specifying an explicit + negative_caching_policy, you should take care to specify a + cache TTL for all response codes that you wish to cache. + Cloud CDN will not apply any default negative caching when a + policy exists. + request_coalescing (bool): + If true then Cloud CDN will combine multiple + concurrent cache fill requests into a small + number of requests to the origin. + + This field is a member of `oneof`_ ``_request_coalescing``. + serve_while_stale (int): + Serve existing content from the cache (if + available) when revalidating content with the + origin, or when an error is encountered when + refreshing the cache. This setting defines the + default "max-stale" duration for any cached + responses that do not specify a max-stale + directive. Stale responses that exceed the TTL + configured here will not be served. The default + limit (max-stale) is 86400s (1 day), which will + allow stale content to be served up to this + limit beyond the max-age (or s-max-age) of a + cached response. The maximum allowed value is + 604800 (1 week). Set this to zero (0) to disable + serve-while-stale. + + This field is a member of `oneof`_ ``_serve_while_stale``. + signed_url_cache_max_age_sec (int): + Maximum number of seconds the response to a signed URL + request will be considered fresh. After this time period, + the response will be revalidated before being served. + Defaults to 1hr (3600s). When serving responses to signed + URL requests, Cloud CDN will internally behave as though all + responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing + Cache-Control header. The actual headers served in responses + will not be altered. + + This field is a member of `oneof`_ ``_signed_url_cache_max_age_sec``. + signed_url_key_names (Sequence[str]): + [Output Only] Names of the keys for signing request URLs. + """ + class CacheMode(proto.Enum): + r"""Specifies the cache setting for all responses from this backend. The + possible values are: USE_ORIGIN_HEADERS Requires the origin to set + valid caching headers to cache content. Responses without these + headers will not be cached at Google's edge, and will require a full + trip to the origin on every request, potentially impacting + performance and increasing load on the origin server. + FORCE_CACHE_ALL Cache all content, ignoring any "private", + "no-store" or "no-cache" directives in Cache-Control response + headers. Warning: this may result in Cloud CDN caching private, + per-user (user identifiable) content. CACHE_ALL_STATIC Automatically + cache static content, including common image formats, media (video + and audio), and web assets (JavaScript and CSS). Requests and + responses that are marked as uncacheable, as well as dynamic content + (including HTML), will not be cached. + """ + UNDEFINED_CACHE_MODE = 0 + CACHE_ALL_STATIC = 355027945 + FORCE_CACHE_ALL = 486026928 + INVALID_CACHE_MODE = 381295560 + USE_ORIGIN_HEADERS = 55380261 + + bypass_cache_on_request_headers = proto.RepeatedField( + proto.MESSAGE, + number=486203082, + message='BackendBucketCdnPolicyBypassCacheOnRequestHeader', + ) + cache_mode = proto.Field( + proto.STRING, + number=28877888, + optional=True, + ) + client_ttl = proto.Field( + proto.INT32, + number=29034360, + optional=True, + ) + default_ttl = proto.Field( + proto.INT32, + number=100253422, + optional=True, + ) + max_ttl = proto.Field( + proto.INT32, + number=307578001, + optional=True, + ) + negative_caching = proto.Field( + proto.BOOL, + number=336110005, + optional=True, + ) + negative_caching_policy = proto.RepeatedField( + proto.MESSAGE, + number=155359996, + message='BackendBucketCdnPolicyNegativeCachingPolicy', + ) + request_coalescing = proto.Field( + proto.BOOL, + number=532808276, + optional=True, + ) + serve_while_stale = proto.Field( + proto.INT32, + number=236682203, + optional=True, + ) + signed_url_cache_max_age_sec = proto.Field( + proto.INT64, + number=269374534, + optional=True, + ) + signed_url_key_names = proto.RepeatedField( + proto.STRING, + number=371848885, + ) + + +class BackendBucketCdnPolicyBypassCacheOnRequestHeader(proto.Message): + r"""Bypass the cache when the specified request headers are present, + e.g. Pragma or Authorization headers. Values are case insensitive. + The presence of such a header overrides the cache_mode setting. + + Attributes: + header_name (str): + The header field name to match on when + bypassing cache. Values are case-insensitive. + + This field is a member of `oneof`_ ``_header_name``. + """ + + header_name = proto.Field( + proto.STRING, + number=110223613, + optional=True, + ) + + +class BackendBucketCdnPolicyNegativeCachingPolicy(proto.Message): + r"""Specify CDN TTLs for response error codes. + + Attributes: + code (int): + The HTTP status code to define a TTL against. + Only HTTP status codes 300, 301, 302, 307, 308, + 404, 405, 410, 421, 451 and 501 are can be + specified as values, and you cannot specify a + status code more than once. + + This field is a member of `oneof`_ ``_code``. + ttl (int): + The TTL (in seconds) for which to cache + responses with the corresponding status code. + The maximum allowed value is 1800s (30 minutes), + noting that infrequently accessed objects may be + evicted from the cache before the defined TTL. + + This field is a member of `oneof`_ ``_ttl``. + """ + + code = proto.Field( + proto.INT32, + number=3059181, + optional=True, + ) + ttl = proto.Field( + proto.INT32, + number=115180, + optional=True, + ) + + +class BackendBucketList(proto.Message): + r"""Contains a list of BackendBucket resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.BackendBucket]): + A list of BackendBucket resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='BackendBucket', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class BackendService(proto.Message): + r"""Represents a Backend Service resource. A backend service defines how + Google Cloud load balancers distribute traffic. The backend service + configuration contains a set of values, such as the protocol used to + connect to backends, various distribution and session settings, + health checks, and timeouts. These settings provide fine-grained + control over how your load balancer behaves. Most of the settings + have default values that allow for easy configuration if you need to + get started quickly. Backend services in Google Compute Engine can + be either regionally or globally scoped. \* + `Global `__ + \* + `Regional `__ + For more information, see Backend Services. + + Attributes: + affinity_cookie_ttl_sec (int): + Lifetime of cookies in seconds. This setting is applicable + to external and internal HTTP(S) load balancers and Traffic + Director and requires GENERATED_COOKIE or HTTP_COOKIE + session affinity. If set to 0, the cookie is non-persistent + and lasts only until the end of the browser session (or + equivalent). The maximum allowed value is one day (86,400). + Not supported when the backend service is referenced by a + URL map that is bound to target gRPC proxy that has + validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_affinity_cookie_ttl_sec``. + backends (Sequence[google.cloud.compute_v1.types.Backend]): + The list of backends that serve this + BackendService. + cdn_policy (google.cloud.compute_v1.types.BackendServiceCdnPolicy): + Cloud CDN configuration for this + BackendService. Only available for specified + load balancer types. + + This field is a member of `oneof`_ ``_cdn_policy``. + circuit_breakers (google.cloud.compute_v1.types.CircuitBreakers): + + This field is a member of `oneof`_ ``_circuit_breakers``. + connection_draining (google.cloud.compute_v1.types.ConnectionDraining): + + This field is a member of `oneof`_ ``_connection_draining``. + consistent_hash (google.cloud.compute_v1.types.ConsistentHashLoadBalancerSettings): + Consistent Hash-based load balancing can be used to provide + soft session affinity based on HTTP headers, cookies or + other properties. This load balancing policy is applicable + only for HTTP connections. The affinity to a particular + destination host will be lost when one or more hosts are + added/removed from the destination service. This field + specifies parameters that control consistent hashing. This + field is only applicable when localityLbPolicy is set to + MAGLEV or RING_HASH. This field is applicable to either: - A + regional backend service with the service_protocol set to + HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to + INTERNAL_MANAGED. - A global backend service with the + load_balancing_scheme set to INTERNAL_SELF_MANAGED. Not + supported when the backend service is referenced by a URL + map that is bound to target gRPC proxy that has + validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_consistent_hash``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + custom_request_headers (Sequence[str]): + Headers that the load balancer adds to proxied requests. See + `Creating custom + headers `__. + custom_response_headers (Sequence[str]): + Headers that the load balancer adds to proxied responses. + See `Creating custom + headers `__. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + enable_c_d_n (bool): + If true, enables Cloud CDN for the backend + service of an external HTTP(S) load balancer. + + This field is a member of `oneof`_ ``_enable_c_d_n``. + failover_policy (google.cloud.compute_v1.types.BackendServiceFailoverPolicy): + Requires at least one backend instance group to be defined + as a backup (failover) backend. For load balancers that have + configurable failover: `Internal TCP/UDP Load + Balancing `__ + and `external TCP/UDP Load + Balancing `__. + + This field is a member of `oneof`_ ``_failover_policy``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a BackendService. An up- + to-date fingerprint must be provided in order to + update the BackendService, otherwise the request + will fail with error 412 conditionNotMet. To see + the latest fingerprint, make a get() request to + retrieve a BackendService. + + This field is a member of `oneof`_ ``_fingerprint``. + health_checks (Sequence[str]): + The list of URLs to the healthChecks, + httpHealthChecks (legacy), or httpsHealthChecks + (legacy) resource for health checking this + backend service. Not all backend services + support legacy health checks. See Load balancer + guide. Currently, at most one health check can + be specified for each backend service. Backend + services with instance group or zonal NEG + backends must have a health check. Backend + services with internet or serverless NEG + backends must not have a health check. + iap (google.cloud.compute_v1.types.BackendServiceIAP): + The configurations for Identity-Aware Proxy + on this resource. Not available for Internal + TCP/UDP Load Balancing and Network Load + Balancing. + + This field is a member of `oneof`_ ``_iap``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always + compute#backendService for backend services. + + This field is a member of `oneof`_ ``_kind``. + load_balancing_scheme (str): + Specifies the load balancer type. A backend + service created for one type of load balancer + cannot be used with another. For more + information, refer to Choosing a load balancer. + Check the LoadBalancingScheme enum for the list + of possible values. + + This field is a member of `oneof`_ ``_load_balancing_scheme``. + locality_lb_policy (str): + The load balancing algorithm used within the scope of the + locality. The possible values are: - ROUND_ROBIN: This is a + simple policy in which each healthy backend is selected in + round robin order. This is the default. - LEAST_REQUEST: An + O(1) algorithm which selects two random healthy hosts and + picks the host which has fewer active requests. - RING_HASH: + The ring/modulo hash load balancer implements consistent + hashing to backends. The algorithm has the property that the + addition/removal of a host from a set of N hosts only + affects 1/N of the requests. - RANDOM: The load balancer + selects a random healthy host. - ORIGINAL_DESTINATION: + Backend host is selected based on the client connection + metadata, i.e., connections are opened to the same address + as the destination address of the incoming connection before + the connection was redirected to the load balancer. - + MAGLEV: used as a drop in replacement for the ring hash load + balancer. Maglev is not as stable as ring hash but has + faster table lookup build times and host selection times. + For more information about Maglev, see + https://ai.google/research/pubs/pub44824 This field is + applicable to either: - A regional backend service with the + service_protocol set to HTTP, HTTPS, or HTTP2, and + load_balancing_scheme set to INTERNAL_MANAGED. - A global + backend service with the load_balancing_scheme set to + INTERNAL_SELF_MANAGED. If sessionAffinity is not NONE, and + this field is not set to MAGLEV or RING_HASH, session + affinity settings will not take effect. Only the default + ROUND_ROBIN policy is supported when the backend service is + referenced by a URL map that is bound to target gRPC proxy + that has validateForProxyless field set to true. Check the + LocalityLbPolicy enum for the list of possible values. + + This field is a member of `oneof`_ ``_locality_lb_policy``. + log_config (google.cloud.compute_v1.types.BackendServiceLogConfig): + This field denotes the logging options for + the load balancer traffic served by this backend + service. If logging is enabled, logs will be + exported to Stackdriver. + + This field is a member of `oneof`_ ``_log_config``. + max_stream_duration (google.cloud.compute_v1.types.Duration): + Specifies the default maximum duration (timeout) for streams + to this service. Duration is computed from the beginning of + the stream until the response has been completely processed, + including all retries. A stream that does not complete in + this duration is closed. If not specified, there will be no + timeout limit, i.e. the maximum duration is infinite. This + value can be overridden in the PathMatcher configuration of + the UrlMap that references this backend service. This field + is only allowed when the loadBalancingScheme of the backend + service is INTERNAL_SELF_MANAGED. + + This field is a member of `oneof`_ ``_max_stream_duration``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network (str): + The URL of the network to which this backend + service belongs. This field can only be + specified when the load balancing scheme is set + to INTERNAL. + + This field is a member of `oneof`_ ``_network``. + outlier_detection (google.cloud.compute_v1.types.OutlierDetection): + Settings controlling the eviction of unhealthy hosts from + the load balancing pool for the backend service. If not set, + this feature is considered disabled. This field is + applicable to either: - A regional backend service with the + service_protocol set to HTTP, HTTPS, or HTTP2, and + load_balancing_scheme set to INTERNAL_MANAGED. - A global + backend service with the load_balancing_scheme set to + INTERNAL_SELF_MANAGED. Not supported when the backend + service is referenced by a URL map that is bound to target + gRPC proxy that has validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_outlier_detection``. + port (int): + Deprecated in favor of portName. The TCP port + to connect on the backend. The default value is + 80. For Internal TCP/UDP Load Balancing and + Network Load Balancing, omit port. + + This field is a member of `oneof`_ ``_port``. + port_name (str): + A named port on a backend instance group representing the + port for communication to the backend VMs in that group. The + named port must be `defined on each backend instance + group `__. + This parameter has no meaning if the backends are NEGs. For + Internal TCP/UDP Load Balancing and Network Load Balancing, + omit port_name. + + This field is a member of `oneof`_ ``_port_name``. + protocol (str): + The protocol this BackendService uses to + communicate with backends. Possible values are + HTTP, HTTPS, HTTP2, TCP, SSL, UDP or GRPC. + depending on the chosen load balancer or Traffic + Director configuration. Refer to the + documentation for the load balancers or for + Traffic Director for more information. Must be + set to GRPC when the backend service is + referenced by a URL map that is bound to target + gRPC proxy. Check the Protocol enum for the list + of possible values. + + This field is a member of `oneof`_ ``_protocol``. + region (str): + [Output Only] URL of the region where the regional backend + service resides. This field is not applicable to global + backend services. You must specify this field as part of the + HTTP request URL. It is not settable as a field in the + request body. + + This field is a member of `oneof`_ ``_region``. + security_policy (str): + [Output Only] The resource URL for the security policy + associated with this backend service. + + This field is a member of `oneof`_ ``_security_policy``. + security_settings (google.cloud.compute_v1.types.SecuritySettings): + This field specifies the security policy that applies to + this backend service. This field is applicable to either: - + A regional backend service with the service_protocol set to + HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to + INTERNAL_MANAGED. - A global backend service with the + load_balancing_scheme set to INTERNAL_SELF_MANAGED. + + This field is a member of `oneof`_ ``_security_settings``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + session_affinity (str): + Type of session affinity to use. The default is NONE. For a + detailed description of session affinity options, see: + `Session + affinity `__. + Not supported when the backend service is referenced by a + URL map that is bound to target gRPC proxy that has + validateForProxyless field set to true. Check the + SessionAffinity enum for the list of possible values. + + This field is a member of `oneof`_ ``_session_affinity``. + subsetting (google.cloud.compute_v1.types.Subsetting): + + This field is a member of `oneof`_ ``_subsetting``. + timeout_sec (int): + Not supported when the backend service is + referenced by a URL map that is bound to target + gRPC proxy that has validateForProxyless field + set to true. Instead, use maxStreamDuration. + + This field is a member of `oneof`_ ``_timeout_sec``. + """ + class LoadBalancingScheme(proto.Enum): + r"""Specifies the load balancer type. A backend service created + for one type of load balancer cannot be used with another. For + more information, refer to Choosing a load balancer. + """ + UNDEFINED_LOAD_BALANCING_SCHEME = 0 + EXTERNAL = 35607499 + INTERNAL = 279295677 + INTERNAL_MANAGED = 37350397 + INTERNAL_SELF_MANAGED = 236211150 + INVALID_LOAD_BALANCING_SCHEME = 275352060 + + class LocalityLbPolicy(proto.Enum): + r"""The load balancing algorithm used within the scope of the locality. + The possible values are: - ROUND_ROBIN: This is a simple policy in + which each healthy backend is selected in round robin order. This is + the default. - LEAST_REQUEST: An O(1) algorithm which selects two + random healthy hosts and picks the host which has fewer active + requests. - RING_HASH: The ring/modulo hash load balancer implements + consistent hashing to backends. The algorithm has the property that + the addition/removal of a host from a set of N hosts only affects + 1/N of the requests. - RANDOM: The load balancer selects a random + healthy host. - ORIGINAL_DESTINATION: Backend host is selected based + on the client connection metadata, i.e., connections are opened to + the same address as the destination address of the incoming + connection before the connection was redirected to the load + balancer. - MAGLEV: used as a drop in replacement for the ring hash + load balancer. Maglev is not as stable as ring hash but has faster + table lookup build times and host selection times. For more + information about Maglev, see + https://ai.google/research/pubs/pub44824 This field is applicable to + either: - A regional backend service with the service_protocol set + to HTTP, HTTPS, or HTTP2, and load_balancing_scheme set to + INTERNAL_MANAGED. - A global backend service with the + load_balancing_scheme set to INTERNAL_SELF_MANAGED. If + sessionAffinity is not NONE, and this field is not set to MAGLEV or + RING_HASH, session affinity settings will not take effect. Only the + default ROUND_ROBIN policy is supported when the backend service is + referenced by a URL map that is bound to target gRPC proxy that has + validateForProxyless field set to true. + """ + UNDEFINED_LOCALITY_LB_POLICY = 0 + INVALID_LB_POLICY = 323318707 + LEAST_REQUEST = 46604921 + MAGLEV = 119180266 + ORIGINAL_DESTINATION = 166297216 + RANDOM = 262527171 + RING_HASH = 432795069 + ROUND_ROBIN = 153895801 + + class Protocol(proto.Enum): + r"""The protocol this BackendService uses to communicate with + backends. Possible values are HTTP, HTTPS, HTTP2, TCP, SSL, UDP + or GRPC. depending on the chosen load balancer or Traffic + Director configuration. Refer to the documentation for the load + balancers or for Traffic Director for more information. Must be + set to GRPC when the backend service is referenced by a URL map + that is bound to target gRPC proxy. + """ + UNDEFINED_PROTOCOL = 0 + GRPC = 2196510 + HTTP = 2228360 + HTTP2 = 69079210 + HTTPS = 69079243 + SSL = 82412 + TCP = 82881 + UDP = 83873 + + class SessionAffinity(proto.Enum): + r"""Type of session affinity to use. The default is NONE. For a detailed + description of session affinity options, see: `Session + affinity `__. + Not supported when the backend service is referenced by a URL map + that is bound to target gRPC proxy that has validateForProxyless + field set to true. + """ + UNDEFINED_SESSION_AFFINITY = 0 + CLIENT_IP = 345665051 + CLIENT_IP_NO_DESTINATION = 106122516 + CLIENT_IP_PORT_PROTO = 221722926 + CLIENT_IP_PROTO = 25322148 + GENERATED_COOKIE = 370321204 + HEADER_FIELD = 200737960 + HTTP_COOKIE = 494981627 + NONE = 2402104 + + affinity_cookie_ttl_sec = proto.Field( + proto.INT32, + number=369996954, + optional=True, + ) + backends = proto.RepeatedField( + proto.MESSAGE, + number=510839903, + message='Backend', + ) + cdn_policy = proto.Field( + proto.MESSAGE, + number=213976452, + optional=True, + message='BackendServiceCdnPolicy', + ) + circuit_breakers = proto.Field( + proto.MESSAGE, + number=421340061, + optional=True, + message='CircuitBreakers', + ) + connection_draining = proto.Field( + proto.MESSAGE, + number=461096747, + optional=True, + message='ConnectionDraining', + ) + consistent_hash = proto.Field( + proto.MESSAGE, + number=905883, + optional=True, + message='ConsistentHashLoadBalancerSettings', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + custom_request_headers = proto.RepeatedField( + proto.STRING, + number=27977992, + ) + custom_response_headers = proto.RepeatedField( + proto.STRING, + number=387539094, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + enable_c_d_n = proto.Field( + proto.BOOL, + number=250733499, + optional=True, + ) + failover_policy = proto.Field( + proto.MESSAGE, + number=105658655, + optional=True, + message='BackendServiceFailoverPolicy', + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + health_checks = proto.RepeatedField( + proto.STRING, + number=448370606, + ) + iap = proto.Field( + proto.MESSAGE, + number=104024, + optional=True, + message='BackendServiceIAP', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + load_balancing_scheme = proto.Field( + proto.STRING, + number=363890244, + optional=True, + ) + locality_lb_policy = proto.Field( + proto.STRING, + number=131431487, + optional=True, + ) + log_config = proto.Field( + proto.MESSAGE, + number=351299741, + optional=True, + message='BackendServiceLogConfig', + ) + max_stream_duration = proto.Field( + proto.MESSAGE, + number=61428376, + optional=True, + message='Duration', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + outlier_detection = proto.Field( + proto.MESSAGE, + number=354625086, + optional=True, + message='OutlierDetection', + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + protocol = proto.Field( + proto.STRING, + number=84577944, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + optional=True, + ) + security_settings = proto.Field( + proto.MESSAGE, + number=478649922, + optional=True, + message='SecuritySettings', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + session_affinity = proto.Field( + proto.STRING, + number=463888561, + optional=True, + ) + subsetting = proto.Field( + proto.MESSAGE, + number=450283536, + optional=True, + message='Subsetting', + ) + timeout_sec = proto.Field( + proto.INT32, + number=79994995, + optional=True, + ) + + +class BackendServiceAggregatedList(proto.Message): + r"""Contains a list of BackendServicesScopedList. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.BackendServiceAggregatedList.ItemsEntry]): + A list of BackendServicesScopedList + resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='BackendServicesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class BackendServiceCdnPolicy(proto.Message): + r"""Message containing Cloud CDN configuration for a backend + service. + + Attributes: + bypass_cache_on_request_headers (Sequence[google.cloud.compute_v1.types.BackendServiceCdnPolicyBypassCacheOnRequestHeader]): + Bypass the cache when the specified request + headers are matched - e.g. Pragma or + Authorization headers. Up to 5 headers can be + specified. The cache is bypassed for all + cdnPolicy.cacheMode settings. + cache_key_policy (google.cloud.compute_v1.types.CacheKeyPolicy): + The CacheKeyPolicy for this CdnPolicy. + + This field is a member of `oneof`_ ``_cache_key_policy``. + cache_mode (str): + Specifies the cache setting for all responses from this + backend. The possible values are: USE_ORIGIN_HEADERS + Requires the origin to set valid caching headers to cache + content. Responses without these headers will not be cached + at Google's edge, and will require a full trip to the origin + on every request, potentially impacting performance and + increasing load on the origin server. FORCE_CACHE_ALL Cache + all content, ignoring any "private", "no-store" or + "no-cache" directives in Cache-Control response headers. + Warning: this may result in Cloud CDN caching private, + per-user (user identifiable) content. CACHE_ALL_STATIC + Automatically cache static content, including common image + formats, media (video and audio), and web assets (JavaScript + and CSS). Requests and responses that are marked as + uncacheable, as well as dynamic content (including HTML), + will not be cached. Check the CacheMode enum for the list of + possible values. + + This field is a member of `oneof`_ ``_cache_mode``. + client_ttl (int): + Specifies a separate client (e.g. browser client) maximum + TTL. This is used to clamp the max-age (or Expires) value + sent to the client. With FORCE_CACHE_ALL, the lesser of + client_ttl and default_ttl is used for the response max-age + directive, along with a "public" directive. For cacheable + content in CACHE_ALL_STATIC mode, client_ttl clamps the + max-age from the origin (if specified), or else sets the + response max-age directive to the lesser of the client_ttl + and default_ttl, and also ensures a "public" cache-control + directive is present. If a client TTL is not specified, a + default value (1 hour) will be used. The maximum allowed + value is 86400s (1 day). + + This field is a member of `oneof`_ ``_client_ttl``. + default_ttl (int): + Specifies the default TTL for cached content served by this + origin for responses that do not have an existing valid TTL + (max-age or s-max-age). Setting a TTL of "0" means "always + revalidate". The value of defaultTTL cannot be set to a + value greater than that of maxTTL, but can be equal. When + the cacheMode is set to FORCE_CACHE_ALL, the defaultTTL will + overwrite the TTL set in all responses. The maximum allowed + value is 31,622,400s (1 year), noting that infrequently + accessed objects may be evicted from the cache before the + defined TTL. + + This field is a member of `oneof`_ ``_default_ttl``. + max_ttl (int): + Specifies the maximum allowed TTL for cached + content served by this origin. Cache directives + that attempt to set a max-age or s-maxage higher + than this, or an Expires header more than maxTTL + seconds in the future will be capped at the + value of maxTTL, as if it were the value of an + s-maxage Cache-Control directive. Headers sent + to the client will not be modified. Setting a + TTL of "0" means "always revalidate". The + maximum allowed value is 31,622,400s (1 year), + noting that infrequently accessed objects may be + evicted from the cache before the defined TTL. + + This field is a member of `oneof`_ ``_max_ttl``. + negative_caching (bool): + Negative caching allows per-status code TTLs to be set, in + order to apply fine-grained caching for common errors or + redirects. This can reduce the load on your origin and + improve end-user experience by reducing response latency. + When the cache mode is set to CACHE_ALL_STATIC or + USE_ORIGIN_HEADERS, negative caching applies to responses + with the specified response code that lack any + Cache-Control, Expires, or Pragma: no-cache directives. When + the cache mode is set to FORCE_CACHE_ALL, negative caching + applies to all responses with the specified response code, + and override any caching headers. By default, Cloud CDN will + apply the following default TTLs to these status codes: HTTP + 300 (Multiple Choice), 301, 308 (Permanent Redirects): 10m + HTTP 404 (Not Found), 410 (Gone), 451 (Unavailable For Legal + Reasons): 120s HTTP 405 (Method Not Found), 421 (Misdirected + Request), 501 (Not Implemented): 60s. These defaults can be + overridden in negative_caching_policy. + + This field is a member of `oneof`_ ``_negative_caching``. + negative_caching_policy (Sequence[google.cloud.compute_v1.types.BackendServiceCdnPolicyNegativeCachingPolicy]): + Sets a cache TTL for the specified HTTP status code. + negative_caching must be enabled to configure + negative_caching_policy. Omitting the policy and leaving + negative_caching enabled will use Cloud CDN's default cache + TTLs. Note that when specifying an explicit + negative_caching_policy, you should take care to specify a + cache TTL for all response codes that you wish to cache. + Cloud CDN will not apply any default negative caching when a + policy exists. + request_coalescing (bool): + If true then Cloud CDN will combine multiple + concurrent cache fill requests into a small + number of requests to the origin. + + This field is a member of `oneof`_ ``_request_coalescing``. + serve_while_stale (int): + Serve existing content from the cache (if + available) when revalidating content with the + origin, or when an error is encountered when + refreshing the cache. This setting defines the + default "max-stale" duration for any cached + responses that do not specify a max-stale + directive. Stale responses that exceed the TTL + configured here will not be served. The default + limit (max-stale) is 86400s (1 day), which will + allow stale content to be served up to this + limit beyond the max-age (or s-max-age) of a + cached response. The maximum allowed value is + 604800 (1 week). Set this to zero (0) to disable + serve-while-stale. + + This field is a member of `oneof`_ ``_serve_while_stale``. + signed_url_cache_max_age_sec (int): + Maximum number of seconds the response to a signed URL + request will be considered fresh. After this time period, + the response will be revalidated before being served. + Defaults to 1hr (3600s). When serving responses to signed + URL requests, Cloud CDN will internally behave as though all + responses from this backend had a "Cache-Control: public, + max-age=[TTL]" header, regardless of any existing + Cache-Control header. The actual headers served in responses + will not be altered. + + This field is a member of `oneof`_ ``_signed_url_cache_max_age_sec``. + signed_url_key_names (Sequence[str]): + [Output Only] Names of the keys for signing request URLs. + """ + class CacheMode(proto.Enum): + r"""Specifies the cache setting for all responses from this backend. The + possible values are: USE_ORIGIN_HEADERS Requires the origin to set + valid caching headers to cache content. Responses without these + headers will not be cached at Google's edge, and will require a full + trip to the origin on every request, potentially impacting + performance and increasing load on the origin server. + FORCE_CACHE_ALL Cache all content, ignoring any "private", + "no-store" or "no-cache" directives in Cache-Control response + headers. Warning: this may result in Cloud CDN caching private, + per-user (user identifiable) content. CACHE_ALL_STATIC Automatically + cache static content, including common image formats, media (video + and audio), and web assets (JavaScript and CSS). Requests and + responses that are marked as uncacheable, as well as dynamic content + (including HTML), will not be cached. + """ + UNDEFINED_CACHE_MODE = 0 + CACHE_ALL_STATIC = 355027945 + FORCE_CACHE_ALL = 486026928 + INVALID_CACHE_MODE = 381295560 + USE_ORIGIN_HEADERS = 55380261 + + bypass_cache_on_request_headers = proto.RepeatedField( + proto.MESSAGE, + number=486203082, + message='BackendServiceCdnPolicyBypassCacheOnRequestHeader', + ) + cache_key_policy = proto.Field( + proto.MESSAGE, + number=159263727, + optional=True, + message='CacheKeyPolicy', + ) + cache_mode = proto.Field( + proto.STRING, + number=28877888, + optional=True, + ) + client_ttl = proto.Field( + proto.INT32, + number=29034360, + optional=True, + ) + default_ttl = proto.Field( + proto.INT32, + number=100253422, + optional=True, + ) + max_ttl = proto.Field( + proto.INT32, + number=307578001, + optional=True, + ) + negative_caching = proto.Field( + proto.BOOL, + number=336110005, + optional=True, + ) + negative_caching_policy = proto.RepeatedField( + proto.MESSAGE, + number=155359996, + message='BackendServiceCdnPolicyNegativeCachingPolicy', + ) + request_coalescing = proto.Field( + proto.BOOL, + number=532808276, + optional=True, + ) + serve_while_stale = proto.Field( + proto.INT32, + number=236682203, + optional=True, + ) + signed_url_cache_max_age_sec = proto.Field( + proto.INT64, + number=269374534, + optional=True, + ) + signed_url_key_names = proto.RepeatedField( + proto.STRING, + number=371848885, + ) + + +class BackendServiceCdnPolicyBypassCacheOnRequestHeader(proto.Message): + r"""Bypass the cache when the specified request headers are present, + e.g. Pragma or Authorization headers. Values are case insensitive. + The presence of such a header overrides the cache_mode setting. + + Attributes: + header_name (str): + The header field name to match on when + bypassing cache. Values are case-insensitive. + + This field is a member of `oneof`_ ``_header_name``. + """ + + header_name = proto.Field( + proto.STRING, + number=110223613, + optional=True, + ) + + +class BackendServiceCdnPolicyNegativeCachingPolicy(proto.Message): + r"""Specify CDN TTLs for response error codes. + + Attributes: + code (int): + The HTTP status code to define a TTL against. + Only HTTP status codes 300, 301, 302, 307, 308, + 404, 405, 410, 421, 451 and 501 are can be + specified as values, and you cannot specify a + status code more than once. + + This field is a member of `oneof`_ ``_code``. + ttl (int): + The TTL (in seconds) for which to cache + responses with the corresponding status code. + The maximum allowed value is 1800s (30 minutes), + noting that infrequently accessed objects may be + evicted from the cache before the defined TTL. + + This field is a member of `oneof`_ ``_ttl``. + """ + + code = proto.Field( + proto.INT32, + number=3059181, + optional=True, + ) + ttl = proto.Field( + proto.INT32, + number=115180, + optional=True, + ) + + +class BackendServiceFailoverPolicy(proto.Message): + r"""For load balancers that have configurable failover: `Internal + TCP/UDP Load + Balancing `__ + and `external TCP/UDP Load + Balancing `__. On failover or + failback, this field indicates whether connection draining will be + honored. Google Cloud has a fixed connection draining timeout of 10 + minutes. A setting of true terminates existing TCP connections to + the active pool during failover and failback, immediately draining + traffic. A setting of false allows existing TCP connections to + persist, even on VMs no longer in the active pool, for up to the + duration of the connection draining timeout (10 minutes). + + Attributes: + disable_connection_drain_on_failover (bool): + This can be set to true only if the protocol + is TCP. The default is false. + + This field is a member of `oneof`_ ``_disable_connection_drain_on_failover``. + drop_traffic_if_unhealthy (bool): + If set to true, connections to the load balancer are dropped + when all primary and all backup backend VMs are unhealthy.If + set to false, connections are distributed among all primary + VMs when all primary and all backup backend VMs are + unhealthy. For load balancers that have configurable + failover: `Internal TCP/UDP Load + Balancing `__ + and `external TCP/UDP Load + Balancing `__. The + default is false. + + This field is a member of `oneof`_ ``_drop_traffic_if_unhealthy``. + failover_ratio (float): + The value of the field must be in the range [0, 1]. If the + value is 0, the load balancer performs a failover when the + number of healthy primary VMs equals zero. For all other + values, the load balancer performs a failover when the total + number of healthy primary VMs is less than this ratio. For + load balancers that have configurable failover: `Internal + TCP/UDP Load + Balancing `__ + and `external TCP/UDP Load + Balancing `__. + + This field is a member of `oneof`_ ``_failover_ratio``. + """ + + disable_connection_drain_on_failover = proto.Field( + proto.BOOL, + number=182150753, + optional=True, + ) + drop_traffic_if_unhealthy = proto.Field( + proto.BOOL, + number=112289428, + optional=True, + ) + failover_ratio = proto.Field( + proto.FLOAT, + number=212667006, + optional=True, + ) + + +class BackendServiceGroupHealth(proto.Message): + r""" + + Attributes: + annotations (Sequence[google.cloud.compute_v1.types.BackendServiceGroupHealth.AnnotationsEntry]): + Metadata defined as annotations on the + network endpoint group. + health_status (Sequence[google.cloud.compute_v1.types.HealthStatus]): + Health state of the backend instances or + endpoints in requested instance or network + endpoint group, determined based on configured + health checks. + kind (str): + [Output Only] Type of resource. Always + compute#backendServiceGroupHealth for the health of backend + services. + + This field is a member of `oneof`_ ``_kind``. + """ + + annotations = proto.MapField( + proto.STRING, + proto.STRING, + number=112032548, + ) + health_status = proto.RepeatedField( + proto.MESSAGE, + number=380545845, + message='HealthStatus', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + + +class BackendServiceIAP(proto.Message): + r"""Identity-Aware Proxy + + Attributes: + enabled (bool): + Whether the serving infrastructure will + authenticate and authorize all incoming + requests. If true, the oauth2ClientId and + oauth2ClientSecret fields must be non-empty. + + This field is a member of `oneof`_ ``_enabled``. + oauth2_client_id (str): + OAuth2 client ID to use for the + authentication flow. + + This field is a member of `oneof`_ ``_oauth2_client_id``. + oauth2_client_secret (str): + OAuth2 client secret to use for the + authentication flow. For security reasons, this + value cannot be retrieved via the API. Instead, + the SHA-256 hash of the value is returned in the + oauth2ClientSecretSha256 field. @InputOnly + + This field is a member of `oneof`_ ``_oauth2_client_secret``. + oauth2_client_secret_sha256 (str): + [Output Only] SHA256 hash value for the field + oauth2_client_secret above. + + This field is a member of `oneof`_ ``_oauth2_client_secret_sha256``. + """ + + enabled = proto.Field( + proto.BOOL, + number=1018689, + optional=True, + ) + oauth2_client_id = proto.Field( + proto.STRING, + number=314017611, + optional=True, + ) + oauth2_client_secret = proto.Field( + proto.STRING, + number=50999520, + optional=True, + ) + oauth2_client_secret_sha256 = proto.Field( + proto.STRING, + number=112903782, + optional=True, + ) + + +class BackendServiceList(proto.Message): + r"""Contains a list of BackendService resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.BackendService]): + A list of BackendService resources. + kind (str): + [Output Only] Type of resource. Always + compute#backendServiceList for lists of backend services. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='BackendService', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class BackendServiceLogConfig(proto.Message): + r"""The available logging options for the load balancer traffic + served by this backend service. + + Attributes: + enable (bool): + This field denotes whether to enable logging + for the load balancer traffic served by this + backend service. + + This field is a member of `oneof`_ ``_enable``. + sample_rate (float): + This field can only be specified if logging is enabled for + this backend service. The value of the field must be in [0, + 1]. This configures the sampling rate of requests to the + load balancer where 1.0 means all logged requests are + reported and 0.0 means no logged requests are reported. The + default value is 1.0. + + This field is a member of `oneof`_ ``_sample_rate``. + """ + + enable = proto.Field( + proto.BOOL, + number=311764355, + optional=True, + ) + sample_rate = proto.Field( + proto.FLOAT, + number=153193045, + optional=True, + ) + + +class BackendServiceReference(proto.Message): + r""" + + Attributes: + backend_service (str): + + This field is a member of `oneof`_ ``_backend_service``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + optional=True, + ) + + +class BackendServicesScopedList(proto.Message): + r""" + + Attributes: + backend_services (Sequence[google.cloud.compute_v1.types.BackendService]): + A list of BackendServices contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of backend services when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + backend_services = proto.RepeatedField( + proto.MESSAGE, + number=388522409, + message='BackendService', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class Binding(proto.Message): + r"""Associates ``members`` with a ``role``. + + Attributes: + binding_id (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_binding_id``. + condition (google.cloud.compute_v1.types.Expr): + The condition that is associated with this binding. If the + condition evaluates to ``true``, then this binding applies + to the current request. If the condition evaluates to + ``false``, then this binding does not apply to the current + request. However, a different role binding might grant the + same role to one or more of the members in this binding. To + learn which resources support conditions in their IAM + policies, see the `IAM + documentation `__. + + This field is a member of `oneof`_ ``_condition``. + members (Sequence[str]): + Specifies the identities requesting access for a Cloud + Platform resource. ``members`` can have the following + values: \* ``allUsers``: A special identifier that + represents anyone who is on the internet; with or without a + Google account. \* ``allAuthenticatedUsers``: A special + identifier that represents anyone who is authenticated with + a Google account or a service account. \* + ``user:{emailid}``: An email address that represents a + specific Google account. For example, ``alice@example.com`` + . \* ``serviceAccount:{emailid}``: An email address that + represents a service account. For example, + ``my-other-app@appspot.gserviceaccount.com``. \* + ``group:{emailid}``: An email address that represents a + Google group. For example, ``admins@example.com``. \* + ``deleted:user:{emailid}?uid={uniqueid}``: An email address + (plus unique identifier) representing a user that has been + recently deleted. For example, + ``alice@example.com?uid=123456789012345678901``. If the user + is recovered, this value reverts to ``user:{emailid}`` and + the recovered user retains the role in the binding. \* + ``deleted:serviceAccount:{emailid}?uid={uniqueid}``: An + email address (plus unique identifier) representing a + service account that has been recently deleted. For example, + ``my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901``. + If the service account is undeleted, this value reverts to + ``serviceAccount:{emailid}`` and the undeleted service + account retains the role in the binding. \* + ``deleted:group:{emailid}?uid={uniqueid}``: An email address + (plus unique identifier) representing a Google group that + has been recently deleted. For example, + ``admins@example.com?uid=123456789012345678901``. If the + group is recovered, this value reverts to + ``group:{emailid}`` and the recovered group retains the role + in the binding. \* ``domain:{domain}``: The G Suite domain + (primary) that represents all the users of that domain. For + example, ``google.com`` or ``example.com``. + role (str): + Role that is assigned to ``members``. For example, + ``roles/viewer``, ``roles/editor``, or ``roles/owner``. + + This field is a member of `oneof`_ ``_role``. + """ + + binding_id = proto.Field( + proto.STRING, + number=441088277, + optional=True, + ) + condition = proto.Field( + proto.MESSAGE, + number=212430107, + optional=True, + message='Expr', + ) + members = proto.RepeatedField( + proto.STRING, + number=412010777, + ) + role = proto.Field( + proto.STRING, + number=3506294, + optional=True, + ) + + +class BulkInsertInstanceRequest(proto.Message): + r"""A request message for Instances.BulkInsert. See the method + description for details. + + Attributes: + bulk_insert_instance_resource_resource (google.cloud.compute_v1.types.BulkInsertInstanceResource): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + bulk_insert_instance_resource_resource = proto.Field( + proto.MESSAGE, + number=41427278, + message='BulkInsertInstanceResource', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class BulkInsertInstanceResource(proto.Message): + r"""A transient resource used in compute.instances.bulkInsert and + compute.regionInstances.bulkInsert . This resource is not + persisted anywhere, it is used only for processing the requests. + + Attributes: + count (int): + The maximum number of instances to create. + + This field is a member of `oneof`_ ``_count``. + instance_properties (google.cloud.compute_v1.types.InstanceProperties): + The instance properties defining the VM + instances to be created. Required if + sourceInstanceTemplate is not provided. + + This field is a member of `oneof`_ ``_instance_properties``. + location_policy (google.cloud.compute_v1.types.LocationPolicy): + Policy for chosing target zone. + + This field is a member of `oneof`_ ``_location_policy``. + min_count (int): + The minimum number of instances to create. If no min_count + is specified then count is used as the default value. If + min_count instances cannot be created, then no instances + will be created and instances already created will be + deleted. + + This field is a member of `oneof`_ ``_min_count``. + name_pattern (str): + The string pattern used for the names of the VMs. Either + name_pattern or per_instance_properties must be set. The + pattern must contain one continuous sequence of placeholder + hash characters (#) with each character corresponding to one + digit of the generated instance name. Example: a + name_pattern of inst-#### generates instance names such as + inst-0001 and inst-0002. If existing instances in the same + project and zone have names that match the name pattern then + the generated instance numbers start after the biggest + existing number. For example, if there exists an instance + with name inst-0050, then instance names generated using the + pattern inst-#### begin with inst-0051. The name pattern + placeholder #...# can contain up to 18 characters. + + This field is a member of `oneof`_ ``_name_pattern``. + per_instance_properties (Sequence[google.cloud.compute_v1.types.BulkInsertInstanceResource.PerInstancePropertiesEntry]): + Per-instance properties to be set on individual instances. + Keys of this map specify requested instance names. Can be + empty if name_pattern is used. + source_instance_template (str): + Specifies the instance template from which to + create instances. You may combine + sourceInstanceTemplate with instanceProperties + to override specific values from an existing + instance template. Bulk API follows the + semantics of JSON Merge Patch described by RFC + 7396. It can be a full or partial URL. For + example, the following are all valid URLs to an + instance template: - + https://www.googleapis.com/compute/v1/projects/project + /global/instanceTemplates/instanceTemplate - + projects/project/global/instanceTemplates/instanceTemplate + - global/instanceTemplates/instanceTemplate This + field is optional. + + This field is a member of `oneof`_ ``_source_instance_template``. + """ + + count = proto.Field( + proto.INT64, + number=94851343, + optional=True, + ) + instance_properties = proto.Field( + proto.MESSAGE, + number=215355165, + optional=True, + message='InstanceProperties', + ) + location_policy = proto.Field( + proto.MESSAGE, + number=465689852, + optional=True, + message='LocationPolicy', + ) + min_count = proto.Field( + proto.INT64, + number=523228386, + optional=True, + ) + name_pattern = proto.Field( + proto.STRING, + number=413815260, + optional=True, + ) + per_instance_properties = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=108502267, + message='BulkInsertInstanceResourcePerInstanceProperties', + ) + source_instance_template = proto.Field( + proto.STRING, + number=332423616, + optional=True, + ) + + +class BulkInsertInstanceResourcePerInstanceProperties(proto.Message): + r"""Per-instance properties to be set on individual instances. To + be extended in the future. + + Attributes: + name (str): + This field is only temporary. It will be + removed. Do not use it. + + This field is a member of `oneof`_ ``_name``. + """ + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + + +class BulkInsertRegionInstanceRequest(proto.Message): + r"""A request message for RegionInstances.BulkInsert. See the + method description for details. + + Attributes: + bulk_insert_instance_resource_resource (google.cloud.compute_v1.types.BulkInsertInstanceResource): + The body resource for this request + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + bulk_insert_instance_resource_resource = proto.Field( + proto.MESSAGE, + number=41427278, + message='BulkInsertInstanceResource', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class CacheInvalidationRule(proto.Message): + r""" + + Attributes: + host (str): + If set, this invalidation rule will only + apply to requests with a Host header matching + host. + + This field is a member of `oneof`_ ``_host``. + path (str): + + This field is a member of `oneof`_ ``_path``. + """ + + host = proto.Field( + proto.STRING, + number=3208616, + optional=True, + ) + path = proto.Field( + proto.STRING, + number=3433509, + optional=True, + ) + + +class CacheKeyPolicy(proto.Message): + r"""Message containing what to include in the cache key for a + request for Cloud CDN. + + Attributes: + include_host (bool): + If true, requests to different hosts will be + cached separately. + + This field is a member of `oneof`_ ``_include_host``. + include_protocol (bool): + If true, http and https requests will be + cached separately. + + This field is a member of `oneof`_ ``_include_protocol``. + include_query_string (bool): + If true, include query string parameters in the cache key + according to query_string_whitelist and + query_string_blacklist. If neither is set, the entire query + string will be included. If false, the query string will be + excluded from the cache key entirely. + + This field is a member of `oneof`_ ``_include_query_string``. + query_string_blacklist (Sequence[str]): + Names of query string parameters to exclude in cache keys. + All other parameters will be included. Either specify + query_string_whitelist or query_string_blacklist, not both. + '&' and '=' will be percent encoded and not treated as + delimiters. + query_string_whitelist (Sequence[str]): + Names of query string parameters to include in cache keys. + All other parameters will be excluded. Either specify + query_string_whitelist or query_string_blacklist, not both. + '&' and '=' will be percent encoded and not treated as + delimiters. + """ + + include_host = proto.Field( + proto.BOOL, + number=486867679, + optional=True, + ) + include_protocol = proto.Field( + proto.BOOL, + number=303507535, + optional=True, + ) + include_query_string = proto.Field( + proto.BOOL, + number=474036639, + optional=True, + ) + query_string_blacklist = proto.RepeatedField( + proto.STRING, + number=354964742, + ) + query_string_whitelist = proto.RepeatedField( + proto.STRING, + number=52456496, + ) + + +class CircuitBreakers(proto.Message): + r"""Settings controlling the volume of requests, connections and + retries to this backend service. + + Attributes: + max_connections (int): + Not supported when the backend service is + referenced by a URL map that is bound to target + gRPC proxy that has validateForProxyless field + set to true. + + This field is a member of `oneof`_ ``_max_connections``. + max_pending_requests (int): + Not supported when the backend service is + referenced by a URL map that is bound to target + gRPC proxy that has validateForProxyless field + set to true. + + This field is a member of `oneof`_ ``_max_pending_requests``. + max_requests (int): + The maximum number of parallel requests that + allowed to the backend service. If not + specified, there is no limit. + + This field is a member of `oneof`_ ``_max_requests``. + max_requests_per_connection (int): + Not supported when the backend service is + referenced by a URL map that is bound to target + gRPC proxy that has validateForProxyless field + set to true. + + This field is a member of `oneof`_ ``_max_requests_per_connection``. + max_retries (int): + Not supported when the backend service is + referenced by a URL map that is bound to target + gRPC proxy that has validateForProxyless field + set to true. + + This field is a member of `oneof`_ ``_max_retries``. + """ + + max_connections = proto.Field( + proto.INT32, + number=110652154, + optional=True, + ) + max_pending_requests = proto.Field( + proto.INT32, + number=375558887, + optional=True, + ) + max_requests = proto.Field( + proto.INT32, + number=28097599, + optional=True, + ) + max_requests_per_connection = proto.Field( + proto.INT32, + number=361630528, + optional=True, + ) + max_retries = proto.Field( + proto.INT32, + number=55546219, + optional=True, + ) + + +class CloneRulesFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.CloneRules. See the + method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + source_firewall_policy (str): + The firewall policy from which to copy rules. + + This field is a member of `oneof`_ ``_source_firewall_policy``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + source_firewall_policy = proto.Field( + proto.STRING, + number=25013549, + optional=True, + ) + + +class Commitment(proto.Message): + r"""Represents a regional Commitment resource. Creating a + commitment resource means that you are purchasing a committed + use contract with an explicit start and end time. You can create + commitments based on vCPUs and memory usage and receive + discounted rates. For full details, read Signing Up for + Committed Use Discounts. + + Attributes: + category (str): + The category of the commitment. Category + MACHINE specifies commitments composed of + machine resources such as VCPU or MEMORY, listed + in resources. Category LICENSE specifies + commitments composed of software licenses, + listed in licenseResources. Note that only + MACHINE commitments should have a Type + specified. Check the Category enum for the list + of possible values. + + This field is a member of `oneof`_ ``_category``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + end_timestamp (str): + [Output Only] Commitment end time in RFC3339 text format. + + This field is a member of `oneof`_ ``_end_timestamp``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#commitment for commitments. + + This field is a member of `oneof`_ ``_kind``. + license_resource (google.cloud.compute_v1.types.LicenseResourceCommitment): + The license specification required as part of + a license commitment. + + This field is a member of `oneof`_ ``_license_resource``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + plan (str): + The plan for this commitment, which determines duration and + discount rate. The currently supported plans are + TWELVE_MONTH (1 year), and THIRTY_SIX_MONTH (3 years). Check + the Plan enum for the list of possible values. + + This field is a member of `oneof`_ ``_plan``. + region (str): + [Output Only] URL of the region where this commitment may be + used. + + This field is a member of `oneof`_ ``_region``. + reservations (Sequence[google.cloud.compute_v1.types.Reservation]): + List of reservations in this commitment. + resources (Sequence[google.cloud.compute_v1.types.ResourceCommitment]): + A list of commitment amounts for particular + resources. Note that VCPU and MEMORY resource + commitments must occur together. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + start_timestamp (str): + [Output Only] Commitment start time in RFC3339 text format. + + This field is a member of `oneof`_ ``_start_timestamp``. + status (str): + [Output Only] Status of the commitment with regards to + eventual expiration (each commitment has an end date + defined). One of the following values: NOT_YET_ACTIVE, + ACTIVE, EXPIRED. Check the Status enum for the list of + possible values. + + This field is a member of `oneof`_ ``_status``. + status_message (str): + [Output Only] An optional, human-readable explanation of the + status. + + This field is a member of `oneof`_ ``_status_message``. + type_ (str): + The type of commitment, which affects the discount rate and + the eligible resources. Type MEMORY_OPTIMIZED specifies a + commitment that will only apply to memory optimized + machines. Type ACCELERATOR_OPTIMIZED specifies a commitment + that will only apply to accelerator optimized machines. + Check the Type enum for the list of possible values. + + This field is a member of `oneof`_ ``_type``. + """ + class Category(proto.Enum): + r"""The category of the commitment. Category MACHINE specifies + commitments composed of machine resources such as VCPU or + MEMORY, listed in resources. Category LICENSE specifies + commitments composed of software licenses, listed in + licenseResources. Note that only MACHINE commitments should have + a Type specified. + """ + UNDEFINED_CATEGORY = 0 + CATEGORY_UNSPECIFIED = 509189462 + LICENSE = 347869217 + MACHINE = 469553191 + + class Plan(proto.Enum): + r"""The plan for this commitment, which determines duration and discount + rate. The currently supported plans are TWELVE_MONTH (1 year), and + THIRTY_SIX_MONTH (3 years). + """ + UNDEFINED_PLAN = 0 + INVALID = 530283991 + THIRTY_SIX_MONTH = 266295942 + TWELVE_MONTH = 173083962 + + class Status(proto.Enum): + r"""[Output Only] Status of the commitment with regards to eventual + expiration (each commitment has an end date defined). One of the + following values: NOT_YET_ACTIVE, ACTIVE, EXPIRED. + """ + UNDEFINED_STATUS = 0 + ACTIVE = 314733318 + CREATING = 455564985 + EXPIRED = 482489093 + NOT_YET_ACTIVE = 20607337 + + class Type(proto.Enum): + r"""The type of commitment, which affects the discount rate and the + eligible resources. Type MEMORY_OPTIMIZED specifies a commitment + that will only apply to memory optimized machines. Type + ACCELERATOR_OPTIMIZED specifies a commitment that will only apply to + accelerator optimized machines. + """ + UNDEFINED_TYPE = 0 + ACCELERATOR_OPTIMIZED = 280848403 + COMPUTE_OPTIMIZED = 158349023 + GENERAL_PURPOSE = 299793543 + GENERAL_PURPOSE_E2 = 301911877 + GENERAL_PURPOSE_N2 = 301912156 + GENERAL_PURPOSE_N2D = 232471400 + MEMORY_OPTIMIZED = 281753417 + TYPE_UNSPECIFIED = 437714322 + + category = proto.Field( + proto.STRING, + number=50511102, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + end_timestamp = proto.Field( + proto.STRING, + number=468096690, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + license_resource = proto.Field( + proto.MESSAGE, + number=437955148, + optional=True, + message='LicenseResourceCommitment', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + plan = proto.Field( + proto.STRING, + number=3443497, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + reservations = proto.RepeatedField( + proto.MESSAGE, + number=399717927, + message='Reservation', + ) + resources = proto.RepeatedField( + proto.MESSAGE, + number=164412965, + message='ResourceCommitment', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + start_timestamp = proto.Field( + proto.STRING, + number=83645817, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + status_message = proto.Field( + proto.STRING, + number=297428154, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class CommitmentAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.CommitmentAggregatedList.ItemsEntry]): + A list of CommitmentsScopedList resources. + kind (str): + [Output Only] Type of resource. Always + compute#commitmentAggregatedList for aggregated lists of + commitments. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='CommitmentsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class CommitmentList(proto.Message): + r"""Contains a list of Commitment resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Commitment]): + A list of Commitment resources. + kind (str): + [Output Only] Type of resource. Always + compute#commitmentList for lists of commitments. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Commitment', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class CommitmentsScopedList(proto.Message): + r""" + + Attributes: + commitments (Sequence[google.cloud.compute_v1.types.Commitment]): + [Output Only] A list of commitments contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of commitments when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + commitments = proto.RepeatedField( + proto.MESSAGE, + number=450664446, + message='Commitment', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class Condition(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + iam (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_iam``. + op (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_op``. + svc (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_svc``. + sys (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_sys``. + values (Sequence[str]): + This is deprecated and has no effect. Do not + use. + """ + + iam = proto.Field( + proto.STRING, + number=104021, + optional=True, + ) + op = proto.Field( + proto.STRING, + number=3553, + optional=True, + ) + svc = proto.Field( + proto.STRING, + number=114272, + optional=True, + ) + sys = proto.Field( + proto.STRING, + number=114381, + optional=True, + ) + values = proto.RepeatedField( + proto.STRING, + number=249928994, + ) + + +class ConfidentialInstanceConfig(proto.Message): + r"""A set of Confidential Instance options. + + Attributes: + enable_confidential_compute (bool): + Defines whether the instance should have + confidential compute enabled. + + This field is a member of `oneof`_ ``_enable_confidential_compute``. + """ + + enable_confidential_compute = proto.Field( + proto.BOOL, + number=102135228, + optional=True, + ) + + +class ConnectionDraining(proto.Message): + r"""Message containing connection draining configuration. + + Attributes: + draining_timeout_sec (int): + Configures a duration timeout for existing + requests on a removed backend instance. For + supported load balancers and protocols, as + described in Enabling connection draining. + + This field is a member of `oneof`_ ``_draining_timeout_sec``. + """ + + draining_timeout_sec = proto.Field( + proto.INT32, + number=225127070, + optional=True, + ) + + +class ConsistentHashLoadBalancerSettings(proto.Message): + r"""This message defines settings for a consistent hash style + load balancer. + + Attributes: + http_cookie (google.cloud.compute_v1.types.ConsistentHashLoadBalancerSettingsHttpCookie): + Hash is based on HTTP Cookie. This field describes a HTTP + cookie that will be used as the hash key for the consistent + hash load balancer. If the cookie is not present, it will be + generated. This field is applicable if the sessionAffinity + is set to HTTP_COOKIE. + + This field is a member of `oneof`_ ``_http_cookie``. + http_header_name (str): + The hash based on the value of the specified header field. + This field is applicable if the sessionAffinity is set to + HEADER_FIELD. + + This field is a member of `oneof`_ ``_http_header_name``. + minimum_ring_size (int): + The minimum number of virtual nodes to use + for the hash ring. Defaults to 1024. Larger ring + sizes result in more granular load + distributions. If the number of hosts in the + load balancing pool is larger than the ring + size, each host will be assigned a single + virtual node. + + This field is a member of `oneof`_ ``_minimum_ring_size``. + """ + + http_cookie = proto.Field( + proto.MESSAGE, + number=6673915, + optional=True, + message='ConsistentHashLoadBalancerSettingsHttpCookie', + ) + http_header_name = proto.Field( + proto.STRING, + number=234798022, + optional=True, + ) + minimum_ring_size = proto.Field( + proto.INT64, + number=234380735, + optional=True, + ) + + +class ConsistentHashLoadBalancerSettingsHttpCookie(proto.Message): + r"""The information about the HTTP Cookie on which the hash + function is based for load balancing policies that use a + consistent hash. + + Attributes: + name (str): + Name of the cookie. + + This field is a member of `oneof`_ ``_name``. + path (str): + Path to set for the cookie. + + This field is a member of `oneof`_ ``_path``. + ttl (google.cloud.compute_v1.types.Duration): + Lifetime of the cookie. + + This field is a member of `oneof`_ ``_ttl``. + """ + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + path = proto.Field( + proto.STRING, + number=3433509, + optional=True, + ) + ttl = proto.Field( + proto.MESSAGE, + number=115180, + optional=True, + message='Duration', + ) + + +class CorsPolicy(proto.Message): + r"""The specification for allowing client side cross-origin + requests. Please see W3C Recommendation for Cross Origin + Resource Sharing + + Attributes: + allow_credentials (bool): + In response to a preflight request, setting + this to true indicates that the actual request + can include user credentials. This translates to + the Access-Control-Allow-Credentials header. + Default is false. + + This field is a member of `oneof`_ ``_allow_credentials``. + allow_headers (Sequence[str]): + Specifies the content for the Access-Control- + llow-Headers header. + allow_methods (Sequence[str]): + Specifies the content for the Access-Control- + llow-Methods header. + allow_origin_regexes (Sequence[str]): + Specifies the regualar expression patterns + that match allowed origins. For regular + expression grammar please see + github.com/google/re2/wiki/Syntax An origin is + allowed if it matches either an item in + allowOrigins or an item in allowOriginRegexes. + allow_origins (Sequence[str]): + Specifies the list of origins that will be + allowed to do CORS requests. An origin is + allowed if it matches either an item in + allowOrigins or an item in allowOriginRegexes. + disabled (bool): + If true, specifies the CORS policy is + disabled. The default value of false, which + indicates that the CORS policy is in effect. + + This field is a member of `oneof`_ ``_disabled``. + expose_headers (Sequence[str]): + Specifies the content for the Access-Control- + xpose-Headers header. + max_age (int): + Specifies how long results of a preflight + request can be cached in seconds. This + translates to the Access-Control-Max-Age header. + + This field is a member of `oneof`_ ``_max_age``. + """ + + allow_credentials = proto.Field( + proto.BOOL, + number=481263366, + optional=True, + ) + allow_headers = proto.RepeatedField( + proto.STRING, + number=45179024, + ) + allow_methods = proto.RepeatedField( + proto.STRING, + number=205405372, + ) + allow_origin_regexes = proto.RepeatedField( + proto.STRING, + number=215385810, + ) + allow_origins = proto.RepeatedField( + proto.STRING, + number=194914071, + ) + disabled = proto.Field( + proto.BOOL, + number=270940796, + optional=True, + ) + expose_headers = proto.RepeatedField( + proto.STRING, + number=247604747, + ) + max_age = proto.Field( + proto.INT32, + number=307559332, + optional=True, + ) + + +class CreateInstancesInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.CreateInstances. + See the method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + instance_group_managers_create_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersCreateInstancesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. The request ID must be a valid UUID + with the exception that zero UUID is not + supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. It should conform to + RFC1035. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_create_instances_request_resource = proto.Field( + proto.MESSAGE, + number=24558867, + message='InstanceGroupManagersCreateInstancesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class CreateInstancesRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.CreateInstances. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + project (str): + Project ID for this request. + region (str): + The name of the region where the managed + instance group is located. It should conform to + RFC1035. + region_instance_group_managers_create_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersCreateInstancesRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. The request ID must be a valid UUID + with the exception that zero UUID is not + supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_managers_create_instances_request_resource = proto.Field( + proto.MESSAGE, + number=359014280, + message='RegionInstanceGroupManagersCreateInstancesRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class CreateSnapshotDiskRequest(proto.Message): + r"""A request message for Disks.CreateSnapshot. See the method + description for details. + + Attributes: + disk (str): + Name of the persistent disk to snapshot. + guest_flush (bool): + [Input Only] Whether to attempt an application consistent + snapshot by informing the OS to prepare for the snapshot + process. Currently only supported on Windows instances using + the Volume Shadow Copy Service (VSS). + + This field is a member of `oneof`_ ``_guest_flush``. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + snapshot_resource (google.cloud.compute_v1.types.Snapshot): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + guest_flush = proto.Field( + proto.BOOL, + number=385550813, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + snapshot_resource = proto.Field( + proto.MESSAGE, + number=481319977, + message='Snapshot', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class CreateSnapshotRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.CreateSnapshot. See the + method description for details. + + Attributes: + disk (str): + Name of the regional persistent disk to + snapshot. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + snapshot_resource (google.cloud.compute_v1.types.Snapshot): + The body resource for this request + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + snapshot_resource = proto.Field( + proto.MESSAGE, + number=481319977, + message='Snapshot', + ) + + +class CustomerEncryptionKey(proto.Message): + r""" + + Attributes: + kms_key_name (str): + The name of the encryption key that is stored in Google + Cloud KMS. For example: "kmsKeyName": + "projects/kms_project_id/locations/region/keyRings/ + key_region/cryptoKeys/key + + This field is a member of `oneof`_ ``_kms_key_name``. + kms_key_service_account (str): + The service account being used for the encryption request + for the given KMS key. If absent, the Compute Engine default + service account is used. For example: + "kmsKeyServiceAccount": + "name@project_id.iam.gserviceaccount.com/ + + This field is a member of `oneof`_ ``_kms_key_service_account``. + raw_key (str): + Specifies a 256-bit customer-supplied + encryption key, encoded in RFC 4648 base64 to + either encrypt or decrypt this resource. You can + provide either the rawKey or the + rsaEncryptedKey. For example: "rawKey": + "SGVsbG8gZnJvbSBHb29nbGUgQ2xvdWQgUGxhdGZvcm0=". + + This field is a member of `oneof`_ ``_raw_key``. + rsa_encrypted_key (str): + Specifies an RFC 4648 base64 encoded, RSA- + rapped 2048-bit customer-supplied encryption key + to either encrypt or decrypt this resource. You + can provide either the rawKey or the + rsaEncryptedKey. For example: "rsaEncryptedKey": + "ieCx/NcW06PcT7Ep1X6LUTc/hLvUDYyzSZPPVCVPTVEohpeHASqC8uw5TzyO9U+Fka9JFH + z0mBibXUInrC/jEk014kCK/NPjYgEMOyssZ4ZINPKxlUh2zn1bV+MCaTICrdmuSBTWlUUiFoD + D6PYznLwh8ZNdaheCeZ8ewEXgFQ8V+sDroLaN3Xs3MDTXQEMMoNUXMCZEIpg9Vtp9x2oe==" + The key must meet the following requirements + before you can provide it to Compute Engine: 1. + The key is wrapped using a RSA public key + certificate provided by Google. 2. After being + wrapped, the key must be encoded in RFC 4648 + base64 encoding. Gets the RSA public key + certificate provided by Google at: + https://cloud- + certs.storage.googleapis.com/google-cloud-csek- + ingress.pem + + This field is a member of `oneof`_ ``_rsa_encrypted_key``. + sha256 (str): + [Output only] The RFC 4648 base64 encoded SHA-256 hash of + the customer-supplied encryption key that protects this + resource. + + This field is a member of `oneof`_ ``_sha256``. + """ + + kms_key_name = proto.Field( + proto.STRING, + number=484373913, + optional=True, + ) + kms_key_service_account = proto.Field( + proto.STRING, + number=209986261, + optional=True, + ) + raw_key = proto.Field( + proto.STRING, + number=449196488, + optional=True, + ) + rsa_encrypted_key = proto.Field( + proto.STRING, + number=335487397, + optional=True, + ) + sha256 = proto.Field( + proto.STRING, + number=170112551, + optional=True, + ) + + +class CustomerEncryptionKeyProtectedDisk(proto.Message): + r""" + + Attributes: + disk_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + Decrypts data associated with the disk with a + customer-supplied encryption key. + + This field is a member of `oneof`_ ``_disk_encryption_key``. + source (str): + Specifies a valid partial or full URL to an existing + Persistent Disk resource. This field is only applicable for + persistent disks. For example: "source": + "/compute/v1/projects/project_id/zones/zone/disks/ disk_name + + This field is a member of `oneof`_ ``_source``. + """ + + disk_encryption_key = proto.Field( + proto.MESSAGE, + number=271660677, + optional=True, + message='CustomerEncryptionKey', + ) + source = proto.Field( + proto.STRING, + number=177235995, + optional=True, + ) + + +class Data(proto.Message): + r""" + + Attributes: + key (str): + [Output Only] A key that provides more detail on the warning + being returned. For example, for warnings where there are no + results in a list request for a particular zone, this key + might be scope and the key value might be the zone name. + Other examples might be a key indicating a deprecated + resource and a suggested replacement, or a warning about + invalid network settings (for example, if an instance + attempts to perform IP forwarding but is not enabled for IP + forwarding). + + This field is a member of `oneof`_ ``_key``. + value (str): + [Output Only] A warning data value corresponding to the key. + + This field is a member of `oneof`_ ``_value``. + """ + + key = proto.Field( + proto.STRING, + number=106079, + optional=True, + ) + value = proto.Field( + proto.STRING, + number=111972721, + optional=True, + ) + + +class DeleteAccessConfigInstanceRequest(proto.Message): + r"""A request message for Instances.DeleteAccessConfig. See the + method description for details. + + Attributes: + access_config (str): + The name of the access config to delete. + instance (str): + The instance name for this request. + network_interface (str): + The name of the network interface. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + access_config = proto.Field( + proto.STRING, + number=72856189, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + network_interface = proto.Field( + proto.STRING, + number=365387880, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteAddressRequest(proto.Message): + r"""A request message for Addresses.Delete. See the method + description for details. + + Attributes: + address (str): + Name of the address resource to delete. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + address = proto.Field( + proto.STRING, + number=462920692, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteAutoscalerRequest(proto.Message): + r"""A request message for Autoscalers.Delete. See the method + description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + Name of the zone for this request. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteBackendBucketRequest(proto.Message): + r"""A request message for BackendBuckets.Delete. See the method + description for details. + + Attributes: + backend_bucket (str): + Name of the BackendBucket resource to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_bucket = proto.Field( + proto.STRING, + number=91714037, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.Delete. See the method + description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to + delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteDiskRequest(proto.Message): + r"""A request message for Disks.Delete. See the method + description for details. + + Attributes: + disk (str): + Name of the persistent disk to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteExternalVpnGatewayRequest(proto.Message): + r"""A request message for ExternalVpnGateways.Delete. See the + method description for details. + + Attributes: + external_vpn_gateway (str): + Name of the externalVpnGateways to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + external_vpn_gateway = proto.Field( + proto.STRING, + number=109898629, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.Delete. See the method + description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to delete. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteFirewallRequest(proto.Message): + r"""A request message for Firewalls.Delete. See the method + description for details. + + Attributes: + firewall (str): + Name of the firewall rule to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall = proto.Field( + proto.STRING, + number=511016192, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteForwardingRuleRequest(proto.Message): + r"""A request message for ForwardingRules.Delete. See the method + description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource to + delete. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteGlobalAddressRequest(proto.Message): + r"""A request message for GlobalAddresses.Delete. See the method + description for details. + + Attributes: + address (str): + Name of the address resource to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + address = proto.Field( + proto.STRING, + number=462920692, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteGlobalForwardingRuleRequest(proto.Message): + r"""A request message for GlobalForwardingRules.Delete. See the + method description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource to + delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteGlobalNetworkEndpointGroupRequest(proto.Message): + r"""A request message for GlobalNetworkEndpointGroups.Delete. See + the method description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group to + delete. It should comply with RFC1035. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteGlobalOperationRequest(proto.Message): + r"""A request message for GlobalOperations.Delete. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to delete. + project (str): + Project ID for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class DeleteGlobalOperationResponse(proto.Message): + r"""A response message for GlobalOperations.Delete. See the + method description for details. + + """ + + +class DeleteGlobalOrganizationOperationRequest(proto.Message): + r"""A request message for GlobalOrganizationOperations.Delete. + See the method description for details. + + Attributes: + operation (str): + Name of the Operations resource to delete. + parent_id (str): + Parent ID for this request. + + This field is a member of `oneof`_ ``_parent_id``. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + parent_id = proto.Field( + proto.STRING, + number=459714768, + optional=True, + ) + + +class DeleteGlobalOrganizationOperationResponse(proto.Message): + r"""A response message for GlobalOrganizationOperations.Delete. + See the method description for details. + + """ + + +class DeleteGlobalPublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for GlobalPublicDelegatedPrefixes.Delete. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix resource to + delete. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix = proto.Field( + proto.STRING, + number=204238440, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteHealthCheckRequest(proto.Message): + r"""A request message for HealthChecks.Delete. See the method + description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteImageRequest(proto.Message): + r"""A request message for Images.Delete. See the method + description for details. + + Attributes: + image (str): + Name of the image resource to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + image = proto.Field( + proto.STRING, + number=100313435, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.Delete. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group to + delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteInstanceGroupRequest(proto.Message): + r"""A request message for InstanceGroups.Delete. See the method + description for details. + + Attributes: + instance_group (str): + The name of the instance group to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the instance group + is located. + """ + + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteInstanceRequest(proto.Message): + r"""A request message for Instances.Delete. See the method + description for details. + + Attributes: + instance (str): + Name of the instance resource to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteInstanceTemplateRequest(proto.Message): + r"""A request message for InstanceTemplates.Delete. See the + method description for details. + + Attributes: + instance_template (str): + The name of the instance template to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_template = proto.Field( + proto.STRING, + number=309248228, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteInstancesInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.DeleteInstances. + See the method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + instance_group_managers_delete_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersDeleteInstancesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_delete_instances_request_resource = proto.Field( + proto.MESSAGE, + number=166421252, + message='InstanceGroupManagersDeleteInstancesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteInstancesRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.DeleteInstances. See the method + description for details. + + Attributes: + instance_group_manager (str): + Name of the managed instance group. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_group_managers_delete_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersDeleteInstancesRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_managers_delete_instances_request_resource = proto.Field( + proto.MESSAGE, + number=500876665, + message='RegionInstanceGroupManagersDeleteInstancesRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteInterconnectAttachmentRequest(proto.Message): + r"""A request message for InterconnectAttachments.Delete. See the + method description for details. + + Attributes: + interconnect_attachment (str): + Name of the interconnect attachment to + delete. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + interconnect_attachment = proto.Field( + proto.STRING, + number=308135284, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteInterconnectRequest(proto.Message): + r"""A request message for Interconnects.Delete. See the method + description for details. + + Attributes: + interconnect (str): + Name of the interconnect to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + interconnect = proto.Field( + proto.STRING, + number=224601230, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteLicenseRequest(proto.Message): + r"""A request message for Licenses.Delete. See the method + description for details. + + Attributes: + license_ (str): + Name of the license resource to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + license_ = proto.Field( + proto.STRING, + number=166757441, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteNetworkEndpointGroupRequest(proto.Message): + r"""A request message for NetworkEndpointGroups.Delete. See the + method description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group to + delete. It should comply with RFC1035. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the network + endpoint group is located. It should comply with + RFC1035. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteNetworkRequest(proto.Message): + r"""A request message for Networks.Delete. See the method + description for details. + + Attributes: + network (str): + Name of the network to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.Delete. See the method + description for details. + + Attributes: + node_group (str): + Name of the NodeGroup resource to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + node_group = proto.Field( + proto.STRING, + number=469958146, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteNodeTemplateRequest(proto.Message): + r"""A request message for NodeTemplates.Delete. See the method + description for details. + + Attributes: + node_template (str): + Name of the NodeTemplate resource to delete. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + node_template = proto.Field( + proto.STRING, + number=323154455, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteNodesNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.DeleteNodes. See the method + description for details. + + Attributes: + node_group (str): + Name of the NodeGroup resource whose nodes + will be deleted. + node_groups_delete_nodes_request_resource (google.cloud.compute_v1.types.NodeGroupsDeleteNodesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + node_group = proto.Field( + proto.STRING, + number=469958146, + ) + node_groups_delete_nodes_request_resource = proto.Field( + proto.MESSAGE, + number=183298962, + message='NodeGroupsDeleteNodesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeletePacketMirroringRequest(proto.Message): + r"""A request message for PacketMirrorings.Delete. See the method + description for details. + + Attributes: + packet_mirroring (str): + Name of the PacketMirroring resource to + delete. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + packet_mirroring = proto.Field( + proto.STRING, + number=22305996, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeletePerInstanceConfigsInstanceGroupManagerRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.DeletePerInstanceConfigs. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + instance_group_managers_delete_per_instance_configs_req_resource (google.cloud.compute_v1.types.InstanceGroupManagersDeletePerInstanceConfigsReq): + The body resource for this request + project (str): + Project ID for this request. + zone (str): + The name of the zone where the managed + instance group is located. It should conform to + RFC1035. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_delete_per_instance_configs_req_resource = proto.Field( + proto.MESSAGE, + number=362427680, + message='InstanceGroupManagersDeletePerInstanceConfigsReq', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeletePerInstanceConfigsRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.DeletePerInstanceConfigs. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request, + should conform to RFC1035. + region_instance_group_manager_delete_instance_config_req_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagerDeleteInstanceConfigReq): + The body resource for this request + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_manager_delete_instance_config_req_resource = proto.Field( + proto.MESSAGE, + number=740741, + message='RegionInstanceGroupManagerDeleteInstanceConfigReq', + ) + + +class DeletePublicAdvertisedPrefixeRequest(proto.Message): + r"""A request message for PublicAdvertisedPrefixes.Delete. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + public_advertised_prefix (str): + Name of the PublicAdvertisedPrefix resource + to delete. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_advertised_prefix = proto.Field( + proto.STRING, + number=101874590, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeletePublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for PublicDelegatedPrefixes.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix resource to + delete. + region (str): + Name of the region of this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix = proto.Field( + proto.STRING, + number=204238440, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionAutoscalerRequest(proto.Message): + r"""A request message for RegionAutoscalers.Delete. See the + method description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to delete. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionBackendServiceRequest(proto.Message): + r"""A request message for RegionBackendServices.Delete. See the + method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to + delete. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.Delete. See the method + description for details. + + Attributes: + disk (str): + Name of the regional persistent disk to + delete. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionHealthCheckRequest(proto.Message): + r"""A request message for RegionHealthChecks.Delete. See the + method description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to delete. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionHealthCheckServiceRequest(proto.Message): + r"""A request message for RegionHealthCheckServices.Delete. See + the method description for details. + + Attributes: + health_check_service (str): + Name of the HealthCheckService to delete. The + name must be 1-63 characters long, and comply + with RFC1035. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check_service = proto.Field( + proto.STRING, + number=408374747, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for RegionInstanceGroupManagers.Delete. See + the method description for details. + + Attributes: + instance_group_manager (str): + Name of the managed instance group to delete. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionNetworkEndpointGroupRequest(proto.Message): + r"""A request message for RegionNetworkEndpointGroups.Delete. See + the method description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group to + delete. It should comply with RFC1035. + project (str): + Project ID for this request. + region (str): + The name of the region where the network + endpoint group is located. It should comply with + RFC1035. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionNotificationEndpointRequest(proto.Message): + r"""A request message for RegionNotificationEndpoints.Delete. See + the method description for details. + + Attributes: + notification_endpoint (str): + Name of the NotificationEndpoint resource to + delete. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + notification_endpoint = proto.Field( + proto.STRING, + number=376807017, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteRegionOperationRequest(proto.Message): + r"""A request message for RegionOperations.Delete. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to delete. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class DeleteRegionOperationResponse(proto.Message): + r"""A response message for RegionOperations.Delete. See the + method description for details. + + """ + + +class DeleteRegionSslCertificateRequest(proto.Message): + r"""A request message for RegionSslCertificates.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + ssl_certificate (str): + Name of the SslCertificate resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_certificate = proto.Field( + proto.STRING, + number=46443492, + ) + + +class DeleteRegionTargetHttpProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpProxies.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_http_proxy (str): + Name of the TargetHttpProxy resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_http_proxy = proto.Field( + proto.STRING, + number=206872421, + ) + + +class DeleteRegionTargetHttpsProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpsProxies.Delete. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy (str): + Name of the TargetHttpsProxy resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class DeleteRegionUrlMapRequest(proto.Message): + r"""A request message for RegionUrlMaps.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + begin_interface: MixerMutationRequestBuilder Request ID to + support idempotency. + + This field is a member of `oneof`_ ``_request_id``. + url_map (str): + Name of the UrlMap resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + + +class DeleteReservationRequest(proto.Message): + r"""A request message for Reservations.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + reservation (str): + Name of the reservation to delete. + zone (str): + Name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + reservation = proto.Field( + proto.STRING, + number=47530956, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource_policy (str): + Name of the resource policy to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource_policy = proto.Field( + proto.STRING, + number=159240835, + ) + + +class DeleteRouteRequest(proto.Message): + r"""A request message for Routes.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + route (str): + Name of the Route resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + route = proto.Field( + proto.STRING, + number=108704329, + ) + + +class DeleteRouterRequest(proto.Message): + r"""A request message for Routers.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + router (str): + Name of the Router resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + router = proto.Field( + proto.STRING, + number=148608841, + ) + + +class DeleteSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + security_policy (str): + Name of the security policy to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + ) + + +class DeleteServiceAttachmentRequest(proto.Message): + r"""A request message for ServiceAttachments.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region of this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + service_attachment (str): + Name of the ServiceAttachment resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + service_attachment = proto.Field( + proto.STRING, + number=338957549, + ) + + +class DeleteSignedUrlKeyBackendBucketRequest(proto.Message): + r"""A request message for BackendBuckets.DeleteSignedUrlKey. See + the method description for details. + + Attributes: + backend_bucket (str): + Name of the BackendBucket resource to which + the Signed URL Key should be added. The name + should conform to RFC1035. + key_name (str): + The name of the Signed URL Key to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_bucket = proto.Field( + proto.STRING, + number=91714037, + ) + key_name = proto.Field( + proto.STRING, + number=500938859, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteSignedUrlKeyBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.DeleteSignedUrlKey. See + the method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to which + the Signed URL Key should be added. The name + should conform to RFC1035. + key_name (str): + The name of the Signed URL Key to delete. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + key_name = proto.Field( + proto.STRING, + number=500938859, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeleteSnapshotRequest(proto.Message): + r"""A request message for Snapshots.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + snapshot (str): + Name of the Snapshot resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + snapshot = proto.Field( + proto.STRING, + number=284874180, + ) + + +class DeleteSslCertificateRequest(proto.Message): + r"""A request message for SslCertificates.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + ssl_certificate (str): + Name of the SslCertificate resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_certificate = proto.Field( + proto.STRING, + number=46443492, + ) + + +class DeleteSslPolicyRequest(proto.Message): + r"""A request message for SslPolicies.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + ssl_policy (str): + Name of the SSL policy to delete. The name + must be 1-63 characters long, and comply with + RFC1035. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_policy = proto.Field( + proto.STRING, + number=295190213, + ) + + +class DeleteSubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + subnetwork (str): + Name of the Subnetwork resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + ) + + +class DeleteTargetGrpcProxyRequest(proto.Message): + r"""A request message for TargetGrpcProxies.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_grpc_proxy (str): + Name of the TargetGrpcProxy resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_grpc_proxy = proto.Field( + proto.STRING, + number=5020283, + ) + + +class DeleteTargetHttpProxyRequest(proto.Message): + r"""A request message for TargetHttpProxies.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_http_proxy (str): + Name of the TargetHttpProxy resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_http_proxy = proto.Field( + proto.STRING, + number=206872421, + ) + + +class DeleteTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy (str): + Name of the TargetHttpsProxy resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class DeleteTargetInstanceRequest(proto.Message): + r"""A request message for TargetInstances.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_instance (str): + Name of the TargetInstance resource to + delete. + zone (str): + Name of the zone scoping this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_instance = proto.Field( + proto.STRING, + number=289769347, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_pool (str): + Name of the TargetPool resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + + +class DeleteTargetSslProxyRequest(proto.Message): + r"""A request message for TargetSslProxies.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_ssl_proxy (str): + Name of the TargetSslProxy resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_ssl_proxy = proto.Field( + proto.STRING, + number=338795853, + ) + + +class DeleteTargetTcpProxyRequest(proto.Message): + r"""A request message for TargetTcpProxies.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_tcp_proxy (str): + Name of the TargetTcpProxy resource to + delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_tcp_proxy = proto.Field( + proto.STRING, + number=503065442, + ) + + +class DeleteTargetVpnGatewayRequest(proto.Message): + r"""A request message for TargetVpnGateways.Delete. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_vpn_gateway (str): + Name of the target VPN gateway to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_vpn_gateway = proto.Field( + proto.STRING, + number=532512843, + ) + + +class DeleteUrlMapRequest(proto.Message): + r"""A request message for UrlMaps.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + url_map (str): + Name of the UrlMap resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + + +class DeleteVpnGatewayRequest(proto.Message): + r"""A request message for VpnGateways.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + vpn_gateway (str): + Name of the VPN gateway to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + vpn_gateway = proto.Field( + proto.STRING, + number=406684153, + ) + + +class DeleteVpnTunnelRequest(proto.Message): + r"""A request message for VpnTunnels.Delete. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + vpn_tunnel (str): + Name of the VpnTunnel resource to delete. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + vpn_tunnel = proto.Field( + proto.STRING, + number=143821331, + ) + + +class DeleteZoneOperationRequest(proto.Message): + r"""A request message for ZoneOperations.Delete. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to delete. + project (str): + Project ID for this request. + zone (str): + Name of the zone for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DeleteZoneOperationResponse(proto.Message): + r"""A response message for ZoneOperations.Delete. See the method + description for details. + + """ + + +class Denied(proto.Message): + r""" + + Attributes: + I_p_protocol (str): + The IP protocol to which this rule applies. + The protocol type is required when creating a + firewall rule. This value can either be one of + the following well known protocol strings (tcp, + udp, icmp, esp, ah, ipip, sctp) or the IP + protocol number. + + This field is a member of `oneof`_ ``_I_p_protocol``. + ports (Sequence[str]): + An optional list of ports to which this rule applies. This + field is only applicable for the UDP or TCP protocol. Each + entry must be either an integer or a range. If not + specified, this rule applies to connections through any + port. Example inputs include: ["22"], ["80","443"], and + ["12345-12349"]. + """ + + I_p_protocol = proto.Field( + proto.STRING, + number=488094525, + optional=True, + ) + ports = proto.RepeatedField( + proto.STRING, + number=106854418, + ) + + +class DeprecateImageRequest(proto.Message): + r"""A request message for Images.Deprecate. See the method + description for details. + + Attributes: + deprecation_status_resource (google.cloud.compute_v1.types.DeprecationStatus): + The body resource for this request + image (str): + Image name. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + deprecation_status_resource = proto.Field( + proto.MESSAGE, + number=333006064, + message='DeprecationStatus', + ) + image = proto.Field( + proto.STRING, + number=100313435, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DeprecationStatus(proto.Message): + r"""Deprecation status for a public resource. + + Attributes: + deleted (str): + An optional RFC3339 timestamp on or after + which the state of this resource is intended to + change to DELETED. This is only informational + and the status will not change unless the client + explicitly changes it. + + This field is a member of `oneof`_ ``_deleted``. + deprecated (str): + An optional RFC3339 timestamp on or after + which the state of this resource is intended to + change to DEPRECATED. This is only informational + and the status will not change unless the client + explicitly changes it. + + This field is a member of `oneof`_ ``_deprecated``. + obsolete (str): + An optional RFC3339 timestamp on or after + which the state of this resource is intended to + change to OBSOLETE. This is only informational + and the status will not change unless the client + explicitly changes it. + + This field is a member of `oneof`_ ``_obsolete``. + replacement (str): + The URL of the suggested replacement for a + deprecated resource. The suggested replacement + resource must be the same kind of resource as + the deprecated resource. + + This field is a member of `oneof`_ ``_replacement``. + state (str): + The deprecation state of this resource. This + can be ACTIVE, DEPRECATED, OBSOLETE, or DELETED. + Operations which communicate the end of life + date for an image, can use ACTIVE. Operations + which create a new resource using a DEPRECATED + resource will return successfully, but with a + warning indicating the deprecated resource and + recommending its replacement. Operations which + use OBSOLETE or DELETED resources will be + rejected and result in an error. Check the State + enum for the list of possible values. + + This field is a member of `oneof`_ ``_state``. + """ + class State(proto.Enum): + r"""The deprecation state of this resource. This can be ACTIVE, + DEPRECATED, OBSOLETE, or DELETED. Operations which communicate + the end of life date for an image, can use ACTIVE. Operations + which create a new resource using a DEPRECATED resource will + return successfully, but with a warning indicating the + deprecated resource and recommending its replacement. Operations + which use OBSOLETE or DELETED resources will be rejected and + result in an error. + """ + UNDEFINED_STATE = 0 + ACTIVE = 314733318 + DELETED = 120962041 + DEPRECATED = 463360435 + OBSOLETE = 66532761 + + deleted = proto.Field( + proto.STRING, + number=476721177, + optional=True, + ) + deprecated = proto.Field( + proto.STRING, + number=515138995, + optional=True, + ) + obsolete = proto.Field( + proto.STRING, + number=357647769, + optional=True, + ) + replacement = proto.Field( + proto.STRING, + number=430919186, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + + +class DetachDiskInstanceRequest(proto.Message): + r"""A request message for Instances.DetachDisk. See the method + description for details. + + Attributes: + device_name (str): + The device name of the disk to detach. Make a + get() request on the instance to view currently + attached disks and device names. + instance (str): + Instance name for this request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + device_name = proto.Field( + proto.STRING, + number=67541716, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest(proto.Message): + r"""A request message for + GlobalNetworkEndpointGroups.DetachNetworkEndpoints. See the + method description for details. + + Attributes: + global_network_endpoint_groups_detach_endpoints_request_resource (google.cloud.compute_v1.types.GlobalNetworkEndpointGroupsDetachEndpointsRequest): + The body resource for this request + network_endpoint_group (str): + The name of the network endpoint group where + you are removing network endpoints. It should + comply with RFC1035. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + global_network_endpoint_groups_detach_endpoints_request_resource = proto.Field( + proto.MESSAGE, + number=8898269, + message='GlobalNetworkEndpointGroupsDetachEndpointsRequest', + ) + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DetachNetworkEndpointsNetworkEndpointGroupRequest(proto.Message): + r"""A request message for + NetworkEndpointGroups.DetachNetworkEndpoints. See the method + description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group where + you are removing network endpoints. It should + comply with RFC1035. + network_endpoint_groups_detach_endpoints_request_resource (google.cloud.compute_v1.types.NetworkEndpointGroupsDetachEndpointsRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the network + endpoint group is located. It should comply with + RFC1035. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + network_endpoint_groups_detach_endpoints_request_resource = proto.Field( + proto.MESSAGE, + number=515608697, + message='NetworkEndpointGroupsDetachEndpointsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class DisableXpnHostProjectRequest(proto.Message): + r"""A request message for Projects.DisableXpnHost. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class DisableXpnResourceProjectRequest(proto.Message): + r"""A request message for Projects.DisableXpnResource. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + projects_disable_xpn_resource_request_resource (google.cloud.compute_v1.types.ProjectsDisableXpnResourceRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + projects_disable_xpn_resource_request_resource = proto.Field( + proto.MESSAGE, + number=209136170, + message='ProjectsDisableXpnResourceRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class Disk(proto.Message): + r"""Represents a Persistent Disk resource. Google Compute Engine has two + Disk resources: \* `Zonal `__ + \* `Regional `__ + Persistent disks are required for running your VM instances. Create + both boot and non-boot (data) persistent disks. For more + information, read Persistent Disks. For more storage options, read + Storage options. The disks resource represents a zonal persistent + disk. For more information, read Zonal persistent disks. The + regionDisks resource represents a regional persistent disk. For more + information, read Regional resources. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + disk_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + Encrypts the disk using a customer-supplied encryption key + or a customer-managed encryption key. Encryption keys do not + protect access to metadata of the disk. After you encrypt a + disk with a customer-supplied key, you must provide the same + key if you use the disk later. For example, to create a disk + snapshot, to create a disk image, to create a machine image, + or to attach the disk to a virtual machine. After you + encrypt a disk with a customer-managed key, the + diskEncryptionKey.kmsKeyName is set to a key *version* name + once the disk is created. The disk is encrypted with this + version of the key. In the response, + diskEncryptionKey.kmsKeyName appears in the following + format: "diskEncryptionKey.kmsKeyName": + "projects/kms_project_id/locations/region/keyRings/ + key_region/cryptoKeys/key /cryptoKeysVersions/version If you + do not provide an encryption key when creating the disk, + then the disk is encrypted using an automatically generated + key and you don't need to provide a key to use the disk + later. + + This field is a member of `oneof`_ ``_disk_encryption_key``. + guest_os_features (Sequence[google.cloud.compute_v1.types.GuestOsFeature]): + A list of features to enable on the guest + operating system. Applicable only for bootable + images. Read Enabling guest operating system + features to see a list of available options. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#disk for + disks. + + This field is a member of `oneof`_ ``_kind``. + label_fingerprint (str): + A fingerprint for the labels being applied to + this disk, which is essentially a hash of the + labels set used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash in order to update or + change labels, otherwise the request will fail + with error 412 conditionNotMet. To see the + latest fingerprint, make a get() request to + retrieve a disk. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.Disk.LabelsEntry]): + Labels to apply to this disk. These can be + later modified by the setLabels method. + last_attach_timestamp (str): + [Output Only] Last attach timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_last_attach_timestamp``. + last_detach_timestamp (str): + [Output Only] Last detach timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_last_detach_timestamp``. + license_codes (Sequence[int]): + Integer license codes indicating which + licenses are attached to this disk. + licenses (Sequence[str]): + A list of publicly visible licenses. Reserved + for Google's use. + location_hint (str): + An opaque location hint used to place the + disk close to other resources. This field is for + use by internal tools that use the public API. + + This field is a member of `oneof`_ ``_location_hint``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + options (str): + Internal use only. + + This field is a member of `oneof`_ ``_options``. + physical_block_size_bytes (int): + Physical block size of the persistent disk, + in bytes. If not present in a request, a default + value is used. The currently supported size is + 4096, other sizes may be added in the future. If + an unsupported value is requested, the error + message will list the supported values for the + caller's project. + + This field is a member of `oneof`_ ``_physical_block_size_bytes``. + provisioned_iops (int): + Indicates how many IOPS to provision for the + disk. This sets the number of I/O operations per + second that the disk can handle. Values must be + between 10,000 and 120,000. For more details, + see the Extreme persistent disk documentation. + + This field is a member of `oneof`_ ``_provisioned_iops``. + region (str): + [Output Only] URL of the region where the disk resides. Only + applicable for regional resources. You must specify this + field as part of the HTTP request URL. It is not settable as + a field in the request body. + + This field is a member of `oneof`_ ``_region``. + replica_zones (Sequence[str]): + URLs of the zones where the disk should be + replicated to. Only applicable for regional + resources. + resource_policies (Sequence[str]): + Resource policies applied to this disk for + automatic snapshot creations. + satisfies_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + self_link (str): + [Output Only] Server-defined fully-qualified URL for this + resource. + + This field is a member of `oneof`_ ``_self_link``. + size_gb (int): + Size, in GB, of the persistent disk. You can + specify this field when creating a persistent + disk using the sourceImage, sourceSnapshot, or + sourceDisk parameter, or specify it alone to + create an empty persistent disk. If you specify + this field along with a source, the value of + sizeGb must not be less than the size of the + source. Acceptable values are 1 to 65536, + inclusive. + + This field is a member of `oneof`_ ``_size_gb``. + source_disk (str): + The source disk used to create this disk. You + can provide this as a partial or full URL to the + resource. For example, the following are valid + values: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /disks/disk - + https://www.googleapis.com/compute/v1/projects/project/regions/region + /disks/disk - + projects/project/zones/zone/disks/disk - + projects/project/regions/region/disks/disk - + zones/zone/disks/disk - + regions/region/disks/disk + + This field is a member of `oneof`_ ``_source_disk``. + source_disk_id (str): + [Output Only] The unique ID of the disk used to create this + disk. This value identifies the exact disk that was used to + create this persistent disk. For example, if you created the + persistent disk from a disk that was later deleted and + recreated under the same name, the source disk ID would + identify the exact version of the disk that was used. + + This field is a member of `oneof`_ ``_source_disk_id``. + source_image (str): + The source image used to create this disk. If + the source image is deleted, this field will not + be set. To create a disk with one of the public + operating system images, specify the image by + its family name. For example, specify + family/debian-9 to use the latest Debian 9 + image: projects/debian- + cloud/global/images/family/debian-9 + Alternatively, use a specific version of a + public operating system image: projects/debian- + cloud/global/images/debian-9-stretch-vYYYYMMDD + To create a disk with a custom image that you + created, specify the image name in the following + format: global/images/my-custom-image You can + also specify a custom image by its image family, + which returns the latest version of the image in + that family. Replace the image name with + family/family-name: global/images/family/my- + image-family + + This field is a member of `oneof`_ ``_source_image``. + source_image_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source image. Required if the source image is + protected by a customer-supplied encryption key. + + This field is a member of `oneof`_ ``_source_image_encryption_key``. + source_image_id (str): + [Output Only] The ID value of the image used to create this + disk. This value identifies the exact image that was used to + create this persistent disk. For example, if you created the + persistent disk from an image that was later deleted and + recreated under the same name, the source image ID would + identify the exact version of the image that was used. + + This field is a member of `oneof`_ ``_source_image_id``. + source_snapshot (str): + The source snapshot used to create this disk. + You can provide this as a partial or full URL to + the resource. For example, the following are + valid values: - + https://www.googleapis.com/compute/v1/projects/project + /global/snapshots/snapshot - + projects/project/global/snapshots/snapshot - + global/snapshots/snapshot + + This field is a member of `oneof`_ ``_source_snapshot``. + source_snapshot_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source snapshot. Required if the source snapshot + is protected by a customer-supplied encryption + key. + + This field is a member of `oneof`_ ``_source_snapshot_encryption_key``. + source_snapshot_id (str): + [Output Only] The unique ID of the snapshot used to create + this disk. This value identifies the exact snapshot that was + used to create this persistent disk. For example, if you + created the persistent disk from a snapshot that was later + deleted and recreated under the same name, the source + snapshot ID would identify the exact version of the snapshot + that was used. + + This field is a member of `oneof`_ ``_source_snapshot_id``. + source_storage_object (str): + The full Google Cloud Storage URI where the + disk image is stored. This file must be a gzip- + compressed tarball whose name ends in .tar.gz or + virtual machine disk whose name ends in vmdk. + Valid URIs may start with gs:// or + https://storage.googleapis.com/. This flag is + not optimized for creating multiple disks from a + source storage object. To create many disks from + a source storage object, use gcloud compute + images import instead. + + This field is a member of `oneof`_ ``_source_storage_object``. + status (str): + [Output Only] The status of disk creation. - CREATING: Disk + is provisioning. - RESTORING: Source data is being copied + into the disk. - FAILED: Disk creation failed. - READY: Disk + is ready for use. - DELETING: Disk is deleting. Check the + Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + type_ (str): + URL of the disk type resource describing + which disk type to use to create the disk. + Provide this when creating the disk. For + example: projects/project + /zones/zone/diskTypes/pd-ssd . See Persistent + disk types. + + This field is a member of `oneof`_ ``_type``. + users (Sequence[str]): + [Output Only] Links to the users of the disk (attached + instances) in form: + projects/project/zones/zone/instances/instance + zone (str): + [Output Only] URL of the zone where the disk resides. You + must specify this field as part of the HTTP request URL. It + is not settable as a field in the request body. + + This field is a member of `oneof`_ ``_zone``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of disk creation. - CREATING: Disk is + provisioning. - RESTORING: Source data is being copied into the + disk. - FAILED: Disk creation failed. - READY: Disk is ready for + use. - DELETING: Disk is deleting. + """ + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + FAILED = 455706685 + READY = 77848963 + RESTORING = 404263851 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disk_encryption_key = proto.Field( + proto.MESSAGE, + number=271660677, + optional=True, + message='CustomerEncryptionKey', + ) + guest_os_features = proto.RepeatedField( + proto.MESSAGE, + number=79294545, + message='GuestOsFeature', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + last_attach_timestamp = proto.Field( + proto.STRING, + number=42159653, + optional=True, + ) + last_detach_timestamp = proto.Field( + proto.STRING, + number=56471027, + optional=True, + ) + license_codes = proto.RepeatedField( + proto.INT64, + number=45482664, + ) + licenses = proto.RepeatedField( + proto.STRING, + number=337642578, + ) + location_hint = proto.Field( + proto.STRING, + number=350519505, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + options = proto.Field( + proto.STRING, + number=361137822, + optional=True, + ) + physical_block_size_bytes = proto.Field( + proto.INT64, + number=420007943, + optional=True, + ) + provisioned_iops = proto.Field( + proto.INT64, + number=186769108, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + replica_zones = proto.RepeatedField( + proto.STRING, + number=48438272, + ) + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + size_gb = proto.Field( + proto.INT64, + number=494929369, + optional=True, + ) + source_disk = proto.Field( + proto.STRING, + number=451753793, + optional=True, + ) + source_disk_id = proto.Field( + proto.STRING, + number=454190809, + optional=True, + ) + source_image = proto.Field( + proto.STRING, + number=50443319, + optional=True, + ) + source_image_encryption_key = proto.Field( + proto.MESSAGE, + number=381503659, + optional=True, + message='CustomerEncryptionKey', + ) + source_image_id = proto.Field( + proto.STRING, + number=55328291, + optional=True, + ) + source_snapshot = proto.Field( + proto.STRING, + number=126061928, + optional=True, + ) + source_snapshot_encryption_key = proto.Field( + proto.MESSAGE, + number=303679322, + optional=True, + message='CustomerEncryptionKey', + ) + source_snapshot_id = proto.Field( + proto.STRING, + number=98962258, + optional=True, + ) + source_storage_object = proto.Field( + proto.STRING, + number=233052711, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + users = proto.RepeatedField( + proto.STRING, + number=111578632, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class DiskAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.DiskAggregatedList.ItemsEntry]): + A list of DisksScopedList resources. + kind (str): + [Output Only] Type of resource. Always + compute#diskAggregatedList for aggregated lists of + persistent disks. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='DisksScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class DiskInstantiationConfig(proto.Message): + r"""A specification of the desired way to instantiate a disk in + the instance template when its created from a source instance. + + Attributes: + auto_delete (bool): + Specifies whether the disk will be auto- + eleted when the instance is deleted (but not + when the disk is detached from the instance). + + This field is a member of `oneof`_ ``_auto_delete``. + custom_image (str): + The custom source image to be used to restore + this disk when instantiating this instance + template. + + This field is a member of `oneof`_ ``_custom_image``. + device_name (str): + Specifies the device name of the disk to + which the configurations apply to. + + This field is a member of `oneof`_ ``_device_name``. + instantiate_from (str): + Specifies whether to include the disk and + what image to use. Possible values are: - + source-image: to use the same image that was + used to create the source instance's + corresponding disk. Applicable to the boot disk + and additional read-write disks. - source-image- + family: to use the same image family that was + used to create the source instance's + corresponding disk. Applicable to the boot disk + and additional read-write disks. - custom-image: + to use a user-provided image url for disk + creation. Applicable to the boot disk and + additional read-write disks. - attach-read-only: + to attach a read-only disk. Applicable to read- + only disks. - do-not-include: to exclude a disk + from the template. Applicable to additional + read-write disks, local SSDs, and read-only + disks. Check the InstantiateFrom enum for the + list of possible values. + + This field is a member of `oneof`_ ``_instantiate_from``. + """ + class InstantiateFrom(proto.Enum): + r"""Specifies whether to include the disk and what image to use. + Possible values are: - source-image: to use the same image that + was used to create the source instance's corresponding disk. + Applicable to the boot disk and additional read-write disks. - + source-image-family: to use the same image family that was used + to create the source instance's corresponding disk. Applicable + to the boot disk and additional read-write disks. - custom- + image: to use a user-provided image url for disk creation. + Applicable to the boot disk and additional read-write disks. - + attach-read-only: to attach a read-only disk. Applicable to + read-only disks. - do-not-include: to exclude a disk from the + template. Applicable to additional read-write disks, local SSDs, + and read-only disks. + """ + UNDEFINED_INSTANTIATE_FROM = 0 + ATTACH_READ_ONLY = 513775419 + BLANK = 63281460 + CUSTOM_IMAGE = 196311789 + DEFAULT = 115302945 + DO_NOT_INCLUDE = 104218952 + SOURCE_IMAGE = 62631959 + SOURCE_IMAGE_FAMILY = 76850316 + + auto_delete = proto.Field( + proto.BOOL, + number=464761403, + optional=True, + ) + custom_image = proto.Field( + proto.STRING, + number=184123149, + optional=True, + ) + device_name = proto.Field( + proto.STRING, + number=67541716, + optional=True, + ) + instantiate_from = proto.Field( + proto.STRING, + number=393383903, + optional=True, + ) + + +class DiskList(proto.Message): + r"""A list of Disk resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Disk]): + A list of Disk resources. + kind (str): + [Output Only] Type of resource. Always compute#diskList for + lists of disks. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Disk', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class DiskMoveRequest(proto.Message): + r""" + + Attributes: + destination_zone (str): + The URL of the destination zone to move the + disk. This can be a full or partial URL. For + example, the following are all valid URLs to a + zone: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + - projects/project/zones/zone - zones/zone + + This field is a member of `oneof`_ ``_destination_zone``. + target_disk (str): + The URL of the target disk to move. This can + be a full or partial URL. For example, the + following are all valid URLs to a disk: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /disks/disk - + projects/project/zones/zone/disks/disk - + zones/zone/disks/disk + + This field is a member of `oneof`_ ``_target_disk``. + """ + + destination_zone = proto.Field( + proto.STRING, + number=131854653, + optional=True, + ) + target_disk = proto.Field( + proto.STRING, + number=62433163, + optional=True, + ) + + +class DiskType(proto.Message): + r"""Represents a Disk Type resource. Google Compute Engine has two Disk + Type resources: \* + `Regional `__ \* + `Zonal `__ You can choose + from a variety of disk types based on your needs. For more + information, read Storage options. The diskTypes resource represents + disk types for a zonal persistent disk. For more information, read + Zonal persistent disks. The regionDiskTypes resource represents disk + types for a regional persistent disk. For more information, read + Regional persistent disks. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + default_disk_size_gb (int): + [Output Only] Server-defined default disk size in GB. + + This field is a member of `oneof`_ ``_default_disk_size_gb``. + deprecated (google.cloud.compute_v1.types.DeprecationStatus): + [Output Only] The deprecation status associated with this + disk type. + + This field is a member of `oneof`_ ``_deprecated``. + description (str): + [Output Only] An optional description of this resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#diskType + for disk types. + + This field is a member of `oneof`_ ``_kind``. + name (str): + [Output Only] Name of the resource. + + This field is a member of `oneof`_ ``_name``. + region (str): + [Output Only] URL of the region where the disk type resides. + Only applicable for regional resources. You must specify + this field as part of the HTTP request URL. It is not + settable as a field in the request body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + valid_disk_size (str): + [Output Only] An optional textual description of the valid + disk size, such as "10GB-10TB". + + This field is a member of `oneof`_ ``_valid_disk_size``. + zone (str): + [Output Only] URL of the zone where the disk type resides. + You must specify this field as part of the HTTP request URL. + It is not settable as a field in the request body. + + This field is a member of `oneof`_ ``_zone``. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + default_disk_size_gb = proto.Field( + proto.INT64, + number=270619253, + optional=True, + ) + deprecated = proto.Field( + proto.MESSAGE, + number=515138995, + optional=True, + message='DeprecationStatus', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + valid_disk_size = proto.Field( + proto.STRING, + number=493962464, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class DiskTypeAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.DiskTypeAggregatedList.ItemsEntry]): + A list of DiskTypesScopedList resources. + kind (str): + [Output Only] Type of resource. Always + compute#diskTypeAggregatedList. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='DiskTypesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class DiskTypeList(proto.Message): + r"""Contains a list of disk types. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.DiskType]): + A list of DiskType resources. + kind (str): + [Output Only] Type of resource. Always compute#diskTypeList + for disk types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='DiskType', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class DiskTypesScopedList(proto.Message): + r""" + + Attributes: + disk_types (Sequence[google.cloud.compute_v1.types.DiskType]): + [Output Only] A list of disk types contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of disk types when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + disk_types = proto.RepeatedField( + proto.MESSAGE, + number=198926167, + message='DiskType', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class DisksAddResourcePoliciesRequest(proto.Message): + r""" + + Attributes: + resource_policies (Sequence[str]): + Full or relative path to the resource policy + to be added to this disk. You can only specify + one resource policy. + """ + + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + + +class DisksRemoveResourcePoliciesRequest(proto.Message): + r""" + + Attributes: + resource_policies (Sequence[str]): + Resource policies to be removed from this + disk. + """ + + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + + +class DisksResizeRequest(proto.Message): + r""" + + Attributes: + size_gb (int): + The new size of the persistent disk, which is + specified in GB. + + This field is a member of `oneof`_ ``_size_gb``. + """ + + size_gb = proto.Field( + proto.INT64, + number=494929369, + optional=True, + ) + + +class DisksScopedList(proto.Message): + r""" + + Attributes: + disks (Sequence[google.cloud.compute_v1.types.Disk]): + [Output Only] A list of disks contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of disks when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + disks = proto.RepeatedField( + proto.MESSAGE, + number=95594102, + message='Disk', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class DisplayDevice(proto.Message): + r"""A set of Display Device options + + Attributes: + enable_display (bool): + Defines whether the instance has Display + enabled. + + This field is a member of `oneof`_ ``_enable_display``. + """ + + enable_display = proto.Field( + proto.BOOL, + number=14266886, + optional=True, + ) + + +class DistributionPolicy(proto.Message): + r""" + + Attributes: + target_shape (str): + The distribution shape to which the group + converges either proactively or on resize events + (depending on the value set in + updatePolicy.instanceRedistributionType). Check + the TargetShape enum for the list of possible + values. + + This field is a member of `oneof`_ ``_target_shape``. + zones (Sequence[google.cloud.compute_v1.types.DistributionPolicyZoneConfiguration]): + Zones where the regional managed instance + group will create and manage its instances. + """ + class TargetShape(proto.Enum): + r"""The distribution shape to which the group converges either + proactively or on resize events (depending on the value set in + updatePolicy.instanceRedistributionType). + """ + UNDEFINED_TARGET_SHAPE = 0 + ANY = 64972 + BALANCED = 468409608 + EVEN = 2140442 + + target_shape = proto.Field( + proto.STRING, + number=338621299, + optional=True, + ) + zones = proto.RepeatedField( + proto.MESSAGE, + number=116085319, + message='DistributionPolicyZoneConfiguration', + ) + + +class DistributionPolicyZoneConfiguration(proto.Message): + r""" + + Attributes: + zone (str): + The URL of the zone. The zone must exist in + the region where the managed instance group is + located. + + This field is a member of `oneof`_ ``_zone``. + """ + + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class Duration(proto.Message): + r"""A Duration represents a fixed-length span of time represented + as a count of seconds and fractions of seconds at nanosecond + resolution. It is independent of any calendar and concepts like + "day" or "month". Range is approximately 10,000 years. + + Attributes: + nanos (int): + Span of time that's a fraction of a second at nanosecond + resolution. Durations less than one second are represented + with a 0 ``seconds`` field and a positive ``nanos`` field. + Must be from 0 to 999,999,999 inclusive. + + This field is a member of `oneof`_ ``_nanos``. + seconds (int): + Span of time at a resolution of a second. Must be from 0 to + 315,576,000,000 inclusive. Note: these bounds are computed + from: 60 sec/min \* 60 min/hr \* 24 hr/day \* 365.25 + days/year \* 10000 years + + This field is a member of `oneof`_ ``_seconds``. + """ + + nanos = proto.Field( + proto.INT32, + number=104586303, + optional=True, + ) + seconds = proto.Field( + proto.INT64, + number=359484031, + optional=True, + ) + + +class EnableXpnHostProjectRequest(proto.Message): + r"""A request message for Projects.EnableXpnHost. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class EnableXpnResourceProjectRequest(proto.Message): + r"""A request message for Projects.EnableXpnResource. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + projects_enable_xpn_resource_request_resource (google.cloud.compute_v1.types.ProjectsEnableXpnResourceRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + projects_enable_xpn_resource_request_resource = proto.Field( + proto.MESSAGE, + number=421980207, + message='ProjectsEnableXpnResourceRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class Error(proto.Message): + r"""[Output Only] If errors are generated during processing of the + operation, this field will be populated. + + Attributes: + errors (Sequence[google.cloud.compute_v1.types.Errors]): + [Output Only] The array of errors encountered while + processing this operation. + """ + + errors = proto.RepeatedField( + proto.MESSAGE, + number=315977579, + message='Errors', + ) + + +class Errors(proto.Message): + r""" + + Attributes: + code (str): + [Output Only] The error type identifier for this error. + + This field is a member of `oneof`_ ``_code``. + location (str): + [Output Only] Indicates the field in the request that caused + the error. This property is optional. + + This field is a member of `oneof`_ ``_location``. + message (str): + [Output Only] An optional, human-readable error message. + + This field is a member of `oneof`_ ``_message``. + """ + + code = proto.Field( + proto.STRING, + number=3059181, + optional=True, + ) + location = proto.Field( + proto.STRING, + number=290430901, + optional=True, + ) + message = proto.Field( + proto.STRING, + number=418054151, + optional=True, + ) + + +class ExchangedPeeringRoute(proto.Message): + r""" + + Attributes: + dest_range (str): + The destination range of the route. + + This field is a member of `oneof`_ ``_dest_range``. + imported (bool): + True if the peering route has been imported + from a peer. The actual import happens if the + field networkPeering.importCustomRoutes is true + for this network, and + networkPeering.exportCustomRoutes is true for + the peer network, and the import does not result + in a route conflict. + + This field is a member of `oneof`_ ``_imported``. + next_hop_region (str): + The region of peering route next hop, only + applies to dynamic routes. + + This field is a member of `oneof`_ ``_next_hop_region``. + priority (int): + The priority of the peering route. + + This field is a member of `oneof`_ ``_priority``. + type_ (str): + The type of the peering route. + Check the Type enum for the list of possible + values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""The type of the peering route.""" + UNDEFINED_TYPE = 0 + DYNAMIC_PEERING_ROUTE = 469794858 + STATIC_PEERING_ROUTE = 473407545 + SUBNET_PEERING_ROUTE = 465782504 + + dest_range = proto.Field( + proto.STRING, + number=381327712, + optional=True, + ) + imported = proto.Field( + proto.BOOL, + number=114502404, + optional=True, + ) + next_hop_region = proto.Field( + proto.STRING, + number=122577014, + optional=True, + ) + priority = proto.Field( + proto.UINT32, + number=445151652, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class ExchangedPeeringRoutesList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ExchangedPeeringRoute]): + A list of ExchangedPeeringRoute resources. + kind (str): + [Output Only] Type of resource. Always + compute#exchangedPeeringRoutesList for exchanged peering + routes lists. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='ExchangedPeeringRoute', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ExpandIpCidrRangeSubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.ExpandIpCidrRange. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + subnetwork (str): + Name of the Subnetwork resource to update. + subnetworks_expand_ip_cidr_range_request_resource (google.cloud.compute_v1.types.SubnetworksExpandIpCidrRangeRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + ) + subnetworks_expand_ip_cidr_range_request_resource = proto.Field( + proto.MESSAGE, + number=477014110, + message='SubnetworksExpandIpCidrRangeRequest', + ) + + +class Expr(proto.Message): + r"""Represents a textual expression in the Common Expression Language + (CEL) syntax. CEL is a C-like expression language. The syntax and + semantics of CEL are documented at + https://github.com/google/cel-spec. Example (Comparison): title: + "Summary size limit" description: "Determines if a summary is less + than 100 chars" expression: "document.summary.size() < 100" Example + (Equality): title: "Requestor is owner" description: "Determines if + requestor is the document owner" expression: "document.owner == + request.auth.claims.email" Example (Logic): title: "Public + documents" description: "Determine whether the document should be + publicly visible" expression: "document.type != 'private' && + document.type != 'internal'" Example (Data Manipulation): title: + "Notification string" description: "Create a notification string + with a timestamp." expression: "'New message received at ' + + string(document.create_time)" The exact variables and functions that + may be referenced within an expression are determined by the service + that evaluates it. See the service documentation for additional + information. + + Attributes: + description (str): + Optional. Description of the expression. This + is a longer text which describes the expression, + e.g. when hovered over it in a UI. + + This field is a member of `oneof`_ ``_description``. + expression (str): + Textual representation of an expression in + Common Expression Language syntax. + + This field is a member of `oneof`_ ``_expression``. + location (str): + Optional. String indicating the location of + the expression for error reporting, e.g. a file + name and a position in the file. + + This field is a member of `oneof`_ ``_location``. + title (str): + Optional. Title for the expression, i.e. a + short string describing its purpose. This can be + used e.g. in UIs which allow to enter the + expression. + + This field is a member of `oneof`_ ``_title``. + """ + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + expression = proto.Field( + proto.STRING, + number=352031384, + optional=True, + ) + location = proto.Field( + proto.STRING, + number=290430901, + optional=True, + ) + title = proto.Field( + proto.STRING, + number=110371416, + optional=True, + ) + + +class ExternalVpnGateway(proto.Message): + r"""Represents an external VPN gateway. External VPN gateway is + the on-premises VPN gateway(s) or another cloud provider's VPN + gateway that connects to your Google Cloud VPN gateway. To + create a highly available VPN from Google Cloud Platform to your + VPN gateway or another cloud provider's VPN gateway, you must + create a external VPN gateway resource with information about + the other gateway. For more information about using external VPN + gateways, see Creating an HA VPN gateway and tunnel pair to a + peer VPN. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + interfaces (Sequence[google.cloud.compute_v1.types.ExternalVpnGatewayInterface]): + A list of interfaces for this external VPN + gateway. If your peer-side gateway is an on- + premises gateway and non-AWS cloud providers' + gateway, at most two interfaces can be provided + for an external VPN gateway. If your peer side + is an AWS virtual private gateway, four + interfaces should be provided for an external + VPN gateway. + kind (str): + [Output Only] Type of the resource. Always + compute#externalVpnGateway for externalVpnGateways. + + This field is a member of `oneof`_ ``_kind``. + label_fingerprint (str): + A fingerprint for the labels being applied to + this ExternalVpnGateway, which is essentially a + hash of the labels set used for optimistic + locking. The fingerprint is initially generated + by Compute Engine and changes after every + request to modify or update labels. You must + always provide an up-to-date fingerprint hash in + order to update or change labels, otherwise the + request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve an + ExternalVpnGateway. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.ExternalVpnGateway.LabelsEntry]): + Labels for this resource. These can only be + added or modified by the setLabels method. Each + label key/value pair must comply with RFC1035. + Label values may be empty. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + redundancy_type (str): + Indicates the user-supplied redundancy type + of this external VPN gateway. Check the + RedundancyType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_redundancy_type``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + """ + class RedundancyType(proto.Enum): + r"""Indicates the user-supplied redundancy type of this external + VPN gateway. + """ + UNDEFINED_REDUNDANCY_TYPE = 0 + FOUR_IPS_REDUNDANCY = 520087913 + SINGLE_IP_INTERNALLY_REDUNDANT = 133914873 + TWO_IPS_REDUNDANCY = 367049635 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + interfaces = proto.RepeatedField( + proto.MESSAGE, + number=12073562, + message='ExternalVpnGatewayInterface', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + redundancy_type = proto.Field( + proto.STRING, + number=271443740, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class ExternalVpnGatewayInterface(proto.Message): + r"""The interface for the external VPN gateway. + + Attributes: + id (int): + The numeric ID of this interface. The allowed input values + for this id for different redundancy types of external VPN + gateway: - SINGLE_IP_INTERNALLY_REDUNDANT - 0 - + TWO_IPS_REDUNDANCY - 0, 1 - FOUR_IPS_REDUNDANCY - 0, 1, 2, 3 + + This field is a member of `oneof`_ ``_id``. + ip_address (str): + IP address of the interface in the external + VPN gateway. Only IPv4 is supported. This IP + address can be either from your on-premise + gateway or another Cloud provider's VPN gateway, + it cannot be an IP address from Google Compute + Engine. + + This field is a member of `oneof`_ ``_ip_address``. + """ + + id = proto.Field( + proto.UINT32, + number=3355, + optional=True, + ) + ip_address = proto.Field( + proto.STRING, + number=406272220, + optional=True, + ) + + +class ExternalVpnGatewayList(proto.Message): + r"""Response to the list request, and contains a list of + externalVpnGateways. + + Attributes: + etag (str): + + This field is a member of `oneof`_ ``_etag``. + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ExternalVpnGateway]): + A list of ExternalVpnGateway resources. + kind (str): + [Output Only] Type of resource. Always + compute#externalVpnGatewayList for lists of + externalVpnGateways. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='ExternalVpnGateway', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class FileContentBuffer(proto.Message): + r""" + + Attributes: + content (str): + The raw content in the secure keys file. + + This field is a member of `oneof`_ ``_content``. + file_type (str): + The file type of source file. + Check the FileType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_file_type``. + """ + class FileType(proto.Enum): + r"""The file type of source file.""" + UNDEFINED_FILE_TYPE = 0 + BIN = 65767 + UNDEFINED = 137851184 + X509 = 2674086 + + content = proto.Field( + proto.STRING, + number=414659705, + optional=True, + ) + file_type = proto.Field( + proto.STRING, + number=294346781, + optional=True, + ) + + +class Firewall(proto.Message): + r"""Represents a Firewall Rule resource. Firewall rules allow or + deny ingress traffic to, and egress traffic from your instances. + For more information, read Firewall rules. + + Attributes: + allowed (Sequence[google.cloud.compute_v1.types.Allowed]): + The list of ALLOW rules specified by this + firewall. Each rule specifies a protocol and + port-range tuple that describes a permitted + connection. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + denied (Sequence[google.cloud.compute_v1.types.Denied]): + The list of DENY rules specified by this + firewall. Each rule specifies a protocol and + port-range tuple that describes a denied + connection. + description (str): + An optional description of this resource. + Provide this field when you create the resource. + + This field is a member of `oneof`_ ``_description``. + destination_ranges (Sequence[str]): + If destination ranges are specified, the + firewall rule applies only to traffic that has + destination IP address in these ranges. These + ranges must be expressed in CIDR format. Both + IPv4 and IPv6 are supported. + direction (str): + Direction of traffic to which this firewall applies, either + ``INGRESS`` or ``EGRESS``. The default is ``INGRESS``. For + ``INGRESS`` traffic, you cannot specify the + destinationRanges field, and for ``EGRESS`` traffic, you + cannot specify the sourceRanges or sourceTags fields. Check + the Direction enum for the list of possible values. + + This field is a member of `oneof`_ ``_direction``. + disabled (bool): + Denotes whether the firewall rule is + disabled. When set to true, the firewall rule is + not enforced and the network behaves as if it + did not exist. If this is unspecified, the + firewall rule will be enabled. + + This field is a member of `oneof`_ ``_disabled``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#firewall + for firewall rules. + + This field is a member of `oneof`_ ``_kind``. + log_config (google.cloud.compute_v1.types.FirewallLogConfig): + This field denotes the logging options for a + particular firewall rule. If logging is enabled, + logs will be exported to Cloud Logging. + + This field is a member of `oneof`_ ``_log_config``. + name (str): + Name of the resource; provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + `a-z <[-a-z0-9]*[a-z0-9]>`__?. The first character must be a + lowercase letter, and all following characters (except for + the last character) must be a dash, lowercase letter, or + digit. The last character must be a lowercase letter or + digit. + + This field is a member of `oneof`_ ``_name``. + network (str): + URL of the network resource for this firewall + rule. If not specified when creating a firewall + rule, the default network is used: + global/networks/default If you choose to specify + this field, you can specify the network as a + full or partial URL. For example, the following + are all valid URLs: - + https://www.googleapis.com/compute/v1/projects/myproject/global/networks/my- + network - projects/myproject/global/networks/my- + network - global/networks/default + + This field is a member of `oneof`_ ``_network``. + priority (int): + Priority for this rule. This is an integer between ``0`` and + ``65535``, both inclusive. The default value is ``1000``. + Relative priorities determine which rule takes effect if + multiple rules apply. Lower values indicate higher priority. + For example, a rule with priority ``0`` has higher + precedence than a rule with priority ``1``. DENY rules take + precedence over ALLOW rules if they have equal priority. + Note that VPC networks have implied rules with a priority of + ``65535``. To avoid conflicts with the implied rules, use a + priority number less than ``65535``. + + This field is a member of `oneof`_ ``_priority``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + source_ranges (Sequence[str]): + If source ranges are specified, the firewall + rule applies only to traffic that has a source + IP address in these ranges. These ranges must be + expressed in CIDR format. One or both of + sourceRanges and sourceTags may be set. If both + fields are set, the rule applies to traffic that + has a source IP address within sourceRanges OR a + source IP from a resource with a matching tag + listed in the sourceTags field. The connection + does not need to match both fields for the rule + to apply. Both IPv4 and IPv6 are supported. + source_service_accounts (Sequence[str]): + If source service accounts are specified, the + firewall rules apply only to traffic originating + from an instance with a service account in this + list. Source service accounts cannot be used to + control traffic to an instance's external IP + address because service accounts are associated + with an instance, not an IP address. + sourceRanges can be set at the same time as + sourceServiceAccounts. If both are set, the + firewall applies to traffic that has a source IP + address within the sourceRanges OR a source IP + that belongs to an instance with service account + listed in sourceServiceAccount. The connection + does not need to match both fields for the + firewall to apply. sourceServiceAccounts cannot + be used at the same time as sourceTags or + targetTags. + source_tags (Sequence[str]): + If source tags are specified, the firewall + rule applies only to traffic with source IPs + that match the primary network interfaces of VM + instances that have the tag and are in the same + VPC network. Source tags cannot be used to + control traffic to an instance's external IP + address, it only applies to traffic between + instances in the same virtual network. Because + tags are associated with instances, not IP + addresses. One or both of sourceRanges and + sourceTags may be set. If both fields are set, + the firewall applies to traffic that has a + source IP address within sourceRanges OR a + source IP from a resource with a matching tag + listed in the sourceTags field. The connection + does not need to match both fields for the + firewall to apply. + target_service_accounts (Sequence[str]): + A list of service accounts indicating sets of instances + located in the network that may make network connections as + specified in allowed[]. targetServiceAccounts cannot be used + at the same time as targetTags or sourceTags. If neither + targetServiceAccounts nor targetTags are specified, the + firewall rule applies to all instances on the specified + network. + target_tags (Sequence[str]): + A list of tags that controls which instances + the firewall rule applies to. If targetTags are + specified, then the firewall rule applies only + to instances in the VPC network that have one of + those tags. If no targetTags are specified, the + firewall rule applies to all instances on the + specified network. + """ + class Direction(proto.Enum): + r"""Direction of traffic to which this firewall applies, either + ``INGRESS`` or ``EGRESS``. The default is ``INGRESS``. For + ``INGRESS`` traffic, you cannot specify the destinationRanges field, + and for ``EGRESS`` traffic, you cannot specify the sourceRanges or + sourceTags fields. + """ + UNDEFINED_DIRECTION = 0 + EGRESS = 432880501 + INGRESS = 516931221 + + allowed = proto.RepeatedField( + proto.MESSAGE, + number=162398632, + message='Allowed', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + denied = proto.RepeatedField( + proto.MESSAGE, + number=275217307, + message='Denied', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + destination_ranges = proto.RepeatedField( + proto.STRING, + number=305699879, + ) + direction = proto.Field( + proto.STRING, + number=111150975, + optional=True, + ) + disabled = proto.Field( + proto.BOOL, + number=270940796, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + log_config = proto.Field( + proto.MESSAGE, + number=351299741, + optional=True, + message='FirewallLogConfig', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + source_ranges = proto.RepeatedField( + proto.STRING, + number=200097658, + ) + source_service_accounts = proto.RepeatedField( + proto.STRING, + number=105100756, + ) + source_tags = proto.RepeatedField( + proto.STRING, + number=452222397, + ) + target_service_accounts = proto.RepeatedField( + proto.STRING, + number=457639710, + ) + target_tags = proto.RepeatedField( + proto.STRING, + number=62901767, + ) + + +class FirewallList(proto.Message): + r"""Contains a list of firewalls. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Firewall]): + A list of Firewall resources. + kind (str): + [Output Only] Type of resource. Always compute#firewallList + for lists of firewalls. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Firewall', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class FirewallLogConfig(proto.Message): + r"""The available logging options for a firewall rule. + + Attributes: + enable (bool): + This field denotes whether to enable logging + for a particular firewall rule. + + This field is a member of `oneof`_ ``_enable``. + metadata (str): + This field can only be specified for a + particular firewall rule if logging is enabled + for that rule. This field denotes whether to + include or exclude metadata for firewall logs. + Check the Metadata enum for the list of possible + values. + + This field is a member of `oneof`_ ``_metadata``. + """ + class Metadata(proto.Enum): + r"""This field can only be specified for a particular firewall + rule if logging is enabled for that rule. This field denotes + whether to include or exclude metadata for firewall logs. + """ + UNDEFINED_METADATA = 0 + EXCLUDE_ALL_METADATA = 334519954 + INCLUDE_ALL_METADATA = 164619908 + + enable = proto.Field( + proto.BOOL, + number=311764355, + optional=True, + ) + metadata = proto.Field( + proto.STRING, + number=86866735, + optional=True, + ) + + +class FirewallPoliciesListAssociationsResponse(proto.Message): + r""" + + Attributes: + associations (Sequence[google.cloud.compute_v1.types.FirewallPolicyAssociation]): + A list of associations. + kind (str): + [Output Only] Type of firewallPolicy associations. Always + compute#FirewallPoliciesListAssociations for lists of + firewallPolicy associations. + + This field is a member of `oneof`_ ``_kind``. + """ + + associations = proto.RepeatedField( + proto.MESSAGE, + number=508736530, + message='FirewallPolicyAssociation', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + + +class FirewallPolicy(proto.Message): + r"""Represents a Firewall Policy resource. + + Attributes: + associations (Sequence[google.cloud.compute_v1.types.FirewallPolicyAssociation]): + A list of associations that belong to this + firewall policy. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + display_name (str): + Deprecated, please use short name instead. User-provided + name of the Organization firewall policy. The name should be + unique in the organization in which the firewall policy is + created. This name must be set on creation and cannot be + changed. The name must be 1-63 characters long, and comply + with RFC1035. Specifically, the name must be 1-63 characters + long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_display_name``. + fingerprint (str): + Specifies a fingerprint for this resource, + which is essentially a hash of the metadata's + contents and used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update metadata. You must always provide an + up-to-date fingerprint hash in order to update + or change metadata, otherwise the request will + fail with error 412 conditionNotMet. To see the + latest fingerprint, make get() request to the + firewall policy. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output only] Type of the resource. Always + compute#firewallPolicyfor firewall policies + + This field is a member of `oneof`_ ``_kind``. + name (str): + [Output Only] Name of the resource. It is a numeric ID + allocated by GCP which uniquely identifies the Firewall + Policy. + + This field is a member of `oneof`_ ``_name``. + parent (str): + [Output Only] The parent of the firewall policy. + + This field is a member of `oneof`_ ``_parent``. + rule_tuple_count (int): + [Output Only] Total count of all firewall policy rule + tuples. A firewall policy can not exceed a set number of + tuples. + + This field is a member of `oneof`_ ``_rule_tuple_count``. + rules (Sequence[google.cloud.compute_v1.types.FirewallPolicyRule]): + A list of rules that belong to this policy. There must + always be a default rule (rule with priority 2147483647 and + match "*"). If no rules are provided when creating a + firewall policy, a default rule with action "allow" will be + added. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + self_link_with_id (str): + [Output Only] Server-defined URL for this resource with the + resource id. + + This field is a member of `oneof`_ ``_self_link_with_id``. + short_name (str): + User-provided name of the Organization firewall plicy. The + name should be unique in the organization in which the + firewall policy is created. This name must be set on + creation and cannot be changed. The name must be 1-63 + characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_short_name``. + """ + + associations = proto.RepeatedField( + proto.MESSAGE, + number=508736530, + message='FirewallPolicyAssociation', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + display_name = proto.Field( + proto.STRING, + number=4473832, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + parent = proto.Field( + proto.STRING, + number=78317738, + optional=True, + ) + rule_tuple_count = proto.Field( + proto.INT32, + number=388342037, + optional=True, + ) + rules = proto.RepeatedField( + proto.MESSAGE, + number=108873975, + message='FirewallPolicyRule', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + self_link_with_id = proto.Field( + proto.STRING, + number=44520962, + optional=True, + ) + short_name = proto.Field( + proto.STRING, + number=492051566, + optional=True, + ) + + +class FirewallPolicyAssociation(proto.Message): + r""" + + Attributes: + attachment_target (str): + The target that the firewall policy is + attached to. + + This field is a member of `oneof`_ ``_attachment_target``. + display_name (str): + [Output Only] Deprecated, please use short name instead. The + display name of the firewall policy of the association. + + This field is a member of `oneof`_ ``_display_name``. + firewall_policy_id (str): + [Output Only] The firewall policy ID of the association. + + This field is a member of `oneof`_ ``_firewall_policy_id``. + name (str): + The name for an association. + + This field is a member of `oneof`_ ``_name``. + short_name (str): + [Output Only] The short name of the firewall policy of the + association. + + This field is a member of `oneof`_ ``_short_name``. + """ + + attachment_target = proto.Field( + proto.STRING, + number=175773741, + optional=True, + ) + display_name = proto.Field( + proto.STRING, + number=4473832, + optional=True, + ) + firewall_policy_id = proto.Field( + proto.STRING, + number=357211849, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + short_name = proto.Field( + proto.STRING, + number=492051566, + optional=True, + ) + + +class FirewallPolicyList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.FirewallPolicy]): + A list of FirewallPolicy resources. + kind (str): + [Output Only] Type of resource. Always + compute#firewallPolicyList for listsof FirewallPolicies + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='FirewallPolicy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class FirewallPolicyRule(proto.Message): + r"""Represents a rule that describes one or more match conditions + along with the action to be taken when traffic matches this + condition (allow or deny). + + Attributes: + action (str): + The Action to perform when the client + connection triggers the rule. Can currently be + either "allow" or "deny()" where valid values + for status are 403, 404, and 502. + + This field is a member of `oneof`_ ``_action``. + description (str): + An optional description for this resource. + + This field is a member of `oneof`_ ``_description``. + direction (str): + The direction in which this rule applies. + Check the Direction enum for the list of + possible values. + + This field is a member of `oneof`_ ``_direction``. + disabled (bool): + Denotes whether the firewall policy rule is + disabled. When set to true, the firewall policy + rule is not enforced and traffic behaves as if + it did not exist. If this is unspecified, the + firewall policy rule will be enabled. + + This field is a member of `oneof`_ ``_disabled``. + enable_logging (bool): + Denotes whether to enable logging for a particular rule. If + logging is enabled, logs will be exported to the configured + export destination in Stackdriver. Logs may be exported to + BigQuery or Pub/Sub. Note: you cannot enable logging on + "goto_next" rules. + + This field is a member of `oneof`_ ``_enable_logging``. + kind (str): + [Output only] Type of the resource. Always + compute#firewallPolicyRule for firewall policy rules + + This field is a member of `oneof`_ ``_kind``. + match (google.cloud.compute_v1.types.FirewallPolicyRuleMatcher): + A match condition that incoming traffic is + evaluated against. If it evaluates to true, the + corresponding 'action' is enforced. + + This field is a member of `oneof`_ ``_match``. + priority (int): + An integer indicating the priority of a rule + in the list. The priority must be a positive + value between 0 and 2147483647. Rules are + evaluated from highest to lowest priority where + 0 is the highest priority and 2147483647 is the + lowest prority. + + This field is a member of `oneof`_ ``_priority``. + rule_tuple_count (int): + [Output Only] Calculation of the complexity of a single + firewall policy rule. + + This field is a member of `oneof`_ ``_rule_tuple_count``. + target_resources (Sequence[str]): + A list of network resource URLs to which this + rule applies. This field allows you to control + which network's VMs get this rule. If this field + is left blank, all VMs within the organization + will receive the rule. + target_service_accounts (Sequence[str]): + A list of service accounts indicating the + sets of instances that are applied with this + rule. + """ + class Direction(proto.Enum): + r"""The direction in which this rule applies.""" + UNDEFINED_DIRECTION = 0 + EGRESS = 432880501 + INGRESS = 516931221 + + action = proto.Field( + proto.STRING, + number=187661878, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + direction = proto.Field( + proto.STRING, + number=111150975, + optional=True, + ) + disabled = proto.Field( + proto.BOOL, + number=270940796, + optional=True, + ) + enable_logging = proto.Field( + proto.BOOL, + number=295396515, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + match = proto.Field( + proto.MESSAGE, + number=103668165, + optional=True, + message='FirewallPolicyRuleMatcher', + ) + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + rule_tuple_count = proto.Field( + proto.INT32, + number=388342037, + optional=True, + ) + target_resources = proto.RepeatedField( + proto.STRING, + number=528230647, + ) + target_service_accounts = proto.RepeatedField( + proto.STRING, + number=457639710, + ) + + +class FirewallPolicyRuleMatcher(proto.Message): + r"""Represents a match condition that incoming traffic is + evaluated against. Exactly one field must be specified. + + Attributes: + dest_ip_ranges (Sequence[str]): + CIDR IP address range. Maximum number of + destination CIDR IP ranges allowed is 5000. + layer4_configs (Sequence[google.cloud.compute_v1.types.FirewallPolicyRuleMatcherLayer4Config]): + Pairs of IP protocols and ports that the rule + should match. + src_ip_ranges (Sequence[str]): + CIDR IP address range. Maximum number of + source CIDR IP ranges allowed is 5000. + """ + + dest_ip_ranges = proto.RepeatedField( + proto.STRING, + number=337357713, + ) + layer4_configs = proto.RepeatedField( + proto.MESSAGE, + number=373534261, + message='FirewallPolicyRuleMatcherLayer4Config', + ) + src_ip_ranges = proto.RepeatedField( + proto.STRING, + number=432128083, + ) + + +class FirewallPolicyRuleMatcherLayer4Config(proto.Message): + r""" + + Attributes: + ip_protocol (str): + The IP protocol to which this rule applies. + The protocol type is required when creating a + firewall rule. This value can either be one of + the following well known protocol strings (tcp, + udp, icmp, esp, ah, ipip, sctp), or the IP + protocol number. + + This field is a member of `oneof`_ ``_ip_protocol``. + ports (Sequence[str]): + An optional list of ports to which this rule applies. This + field is only applicable for UDP or TCP protocol. Each entry + must be either an integer or a range. If not specified, this + rule applies to connections through any port. Example inputs + include: ["22"], ["80","443"], and ["12345-12349"]. + """ + + ip_protocol = proto.Field( + proto.STRING, + number=475958960, + optional=True, + ) + ports = proto.RepeatedField( + proto.STRING, + number=106854418, + ) + + +class FixedOrPercent(proto.Message): + r"""Encapsulates numeric value that can be either absolute or + relative. + + Attributes: + calculated (int): + [Output Only] Absolute value of VM instances calculated + based on the specific mode. - If the value is fixed, then + the calculated value is equal to the fixed value. - If the + value is a percent, then the calculated value is percent/100 + \* targetSize. For example, the calculated value of a 80% of + a managed instance group with 150 instances would be (80/100 + \* 150) = 120 VM instances. If there is a remainder, the + number is rounded. + + This field is a member of `oneof`_ ``_calculated``. + fixed (int): + Specifies a fixed number of VM instances. + This must be a positive integer. + + This field is a member of `oneof`_ ``_fixed``. + percent (int): + Specifies a percentage of instances between 0 + to 100%, inclusive. For example, specify 80 for + 80%. + + This field is a member of `oneof`_ ``_percent``. + """ + + calculated = proto.Field( + proto.INT32, + number=472082878, + optional=True, + ) + fixed = proto.Field( + proto.INT32, + number=97445748, + optional=True, + ) + percent = proto.Field( + proto.INT32, + number=394814533, + optional=True, + ) + + +class ForwardingRule(proto.Message): + r"""Represents a Forwarding Rule resource. Forwarding rule resources in + Google Cloud can be either regional or global in scope: \* + `Global `__ + \* + `Regional `__ + A forwarding rule and its corresponding IP address represent the + frontend configuration of a Google Cloud Platform load balancer. + Forwarding rules can also reference target instances and Cloud VPN + Classic gateways (targetVpnGateway). For more information, read + Forwarding rule concepts and Using protocol forwarding. + + Attributes: + I_p_address (str): + IP address that this forwarding rule serves. When a client + sends traffic to this IP address, the forwarding rule + directs the traffic to the target that you specify in the + forwarding rule. If you don't specify a reserved IP address, + an ephemeral IP address is assigned. Methods for specifying + an IP address: \* IPv4 dotted decimal, as in ``100.1.2.3`` + \* Full URL, as in + https://www.googleapis.com/compute/v1/projects/project_id/regions/region + /addresses/address-name \* Partial URL or by name, as in: - + projects/project_id/regions/region/addresses/address-name - + regions/region/addresses/address-name - + global/addresses/address-name - address-name The + loadBalancingScheme and the forwarding rule's target + determine the type of IP address that you can use. For + detailed information, see `IP address + specifications `__. + Must be set to ``0.0.0.0`` when the target is + targetGrpcProxy that has validateForProxyless field set to + true. For Private Service Connect forwarding rules that + forward traffic to Google APIs, IP address must be provided. + + This field is a member of `oneof`_ ``_I_p_address``. + I_p_protocol (str): + The IP protocol to which this rule applies. For protocol + forwarding, valid options are TCP, UDP, ESP, AH, SCTP, ICMP + and L3_DEFAULT. The valid IP protocols are different for + different load balancing products as described in `Load + balancing + features `__. + Check the IPProtocol enum for the list of possible values. + + This field is a member of `oneof`_ ``_I_p_protocol``. + all_ports (bool): + This field is used along with the backend_service field for + Internal TCP/UDP Load Balancing or Network Load Balancing, + or with the target field for internal and external + TargetInstance. You can only use one of ports and + port_range, or allPorts. The three are mutually exclusive. + For TCP, UDP and SCTP traffic, packets addressed to any + ports will be forwarded to the target or backendService. + + This field is a member of `oneof`_ ``_all_ports``. + allow_global_access (bool): + This field is used along with the backend_service field for + internal load balancing or with the target field for + internal TargetInstance. If the field is set to TRUE, + clients can access ILB from all regions. Otherwise only + allows access from clients in the same region as the + internal load balancer. + + This field is a member of `oneof`_ ``_allow_global_access``. + backend_service (str): + Identifies the backend service to which the + forwarding rule sends traffic. Required for + Internal TCP/UDP Load Balancing and Network Load + Balancing; must be omitted for all other load + balancer types. + + This field is a member of `oneof`_ ``_backend_service``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a ForwardingRule. Include + the fingerprint in patch request to ensure that + you do not overwrite changes that were applied + from another concurrent request. To see the + latest fingerprint, make a get() request to + retrieve a ForwardingRule. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + ip_version (str): + The IP Version that will be used by this + forwarding rule. Valid options are IPV4 or IPV6. + This can only be specified for an external + global forwarding rule. Check the IpVersion enum + for the list of possible values. + + This field is a member of `oneof`_ ``_ip_version``. + is_mirroring_collector (bool): + Indicates whether or not this load balancer + can be used as a collector for packet mirroring. + To prevent mirroring loops, instances behind + this load balancer will not have their traffic + mirrored even if a PacketMirroring rule applies + to them. This can only be set to true for load + balancers that have their loadBalancingScheme + set to INTERNAL. + + This field is a member of `oneof`_ ``_is_mirroring_collector``. + kind (str): + [Output Only] Type of the resource. Always + compute#forwardingRule for Forwarding Rule resources. + + This field is a member of `oneof`_ ``_kind``. + label_fingerprint (str): + A fingerprint for the labels being applied to + this resource, which is essentially a hash of + the labels set used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash in order to update or + change labels, otherwise the request will fail + with error 412 conditionNotMet. To see the + latest fingerprint, make a get() request to + retrieve a ForwardingRule. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.ForwardingRule.LabelsEntry]): + Labels for this resource. These can only be + added or modified by the setLabels method. Each + label key/value pair must comply with RFC1035. + Label values may be empty. + load_balancing_scheme (str): + Specifies the forwarding rule type. For more + information about forwarding rules, refer to + Forwarding rule concepts. Check the + LoadBalancingScheme enum for the list of + possible values. + + This field is a member of `oneof`_ ``_load_balancing_scheme``. + metadata_filters (Sequence[google.cloud.compute_v1.types.MetadataFilter]): + Opaque filter criteria used by load balancer to restrict + routing configuration to a limited set of xDS compliant + clients. In their xDS requests to load balancer, xDS clients + present node metadata. When there is a match, the relevant + configuration is made available to those proxies. Otherwise, + all the resources (e.g. TargetHttpProxy, UrlMap) referenced + by the ForwardingRule are not visible to those proxies. For + each metadataFilter in this list, if its filterMatchCriteria + is set to MATCH_ANY, at least one of the filterLabels must + match the corresponding label provided in the metadata. If + its filterMatchCriteria is set to MATCH_ALL, then all of its + filterLabels must match with corresponding labels provided + in the metadata. If multiple metadataFilters are specified, + all of them need to be satisfied in order to be considered a + match. metadataFilters specified here will be applifed + before those specified in the UrlMap that this + ForwardingRule references. metadataFilters only applies to + Loadbalancers that have their loadBalancingScheme set to + INTERNAL_SELF_MANAGED. + name (str): + Name of the resource; provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. For + Private Service Connect forwarding rules that forward + traffic to Google APIs, the forwarding rule name must be a + 1-20 characters string with lowercase letters and numbers + and must start with a letter. + + This field is a member of `oneof`_ ``_name``. + network (str): + This field is not used for external load + balancing. For Internal TCP/UDP Load Balancing, + this field identifies the network that the load + balanced IP should belong to for this Forwarding + Rule. If this field is not specified, the + default network will be used. For Private + Service Connect forwarding rules that forward + traffic to Google APIs, a network must be + provided. + + This field is a member of `oneof`_ ``_network``. + network_tier (str): + This signifies the networking tier used for + configuring this load balancer and can only take + the following values: PREMIUM, STANDARD. For + regional ForwardingRule, the valid values are + PREMIUM and STANDARD. For GlobalForwardingRule, + the valid value is PREMIUM. If this field is not + specified, it is assumed to be PREMIUM. If + IPAddress is specified, this value must be equal + to the networkTier of the Address. Check the + NetworkTier enum for the list of possible + values. + + This field is a member of `oneof`_ ``_network_tier``. + port_range (str): + This field can be used only if: - Load balancing scheme is + one of EXTERNAL, INTERNAL_SELF_MANAGED or INTERNAL_MANAGED - + IPProtocol is one of TCP, UDP, or SCTP. Packets addressed to + ports in the specified range will be forwarded to target or + backend_service. You can only use one of ports, port_range, + or allPorts. The three are mutually exclusive. Forwarding + rules with the same [IPAddress, IPProtocol] pair must have + disjoint ports. Some types of forwarding target have + constraints on the acceptable ports. For more information, + see `Port + specifications `__. + @pattern: \\d+(?:-\d+)? + + This field is a member of `oneof`_ ``_port_range``. + ports (Sequence[str]): + The ports field is only supported when the forwarding rule + references a backend_service directly. Only packets + addressed to the `specified list of + ports <(https://cloud.google.com/load-balancing/docs/forwarding-rule-concepts#port_specifications)>`__ + are forwarded to backends. You can only use one of ports and + port_range, or allPorts. The three are mutually exclusive. + You can specify a list of up to five ports, which can be + non-contiguous. Forwarding rules with the same [IPAddress, + IPProtocol] pair must have disjoint ports. @pattern: + \\d+(?:-\d+)? + psc_connection_id (int): + [Output Only] The PSC connection id of the PSC Forwarding + Rule. + + This field is a member of `oneof`_ ``_psc_connection_id``. + psc_connection_status (str): + Check the PscConnectionStatus enum for the + list of possible values. + + This field is a member of `oneof`_ ``_psc_connection_status``. + region (str): + [Output Only] URL of the region where the regional + forwarding rule resides. This field is not applicable to + global forwarding rules. You must specify this field as part + of the HTTP request URL. It is not settable as a field in + the request body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + service_directory_registrations (Sequence[google.cloud.compute_v1.types.ForwardingRuleServiceDirectoryRegistration]): + Service Directory resources to register this + forwarding rule with. Currently, only supports a + single Service Directory resource. It is only + supported for internal load balancing. + service_label (str): + An optional prefix to the service name for this Forwarding + Rule. If specified, the prefix is the first label of the + fully qualified service name. The label must be 1-63 + characters long, and comply with RFC1035. Specifically, the + label must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + This field is only used for internal load balancing. + + This field is a member of `oneof`_ ``_service_label``. + service_name (str): + [Output Only] The internal fully qualified service name for + this Forwarding Rule. This field is only used for internal + load balancing. + + This field is a member of `oneof`_ ``_service_name``. + subnetwork (str): + This field identifies the subnetwork that the + load balanced IP should belong to for this + Forwarding Rule, used in internal load balancing + and network load balancing with IPv6. If the + network specified is in auto subnet mode, this + field is optional. However, a subnetwork must be + specified if the network is in custom subnet + mode or when creating external forwarding rule + with IPv6. + + This field is a member of `oneof`_ ``_subnetwork``. + target (str): + + This field is a member of `oneof`_ ``_target``. + """ + class IPProtocol(proto.Enum): + r"""The IP protocol to which this rule applies. For protocol forwarding, + valid options are TCP, UDP, ESP, AH, SCTP, ICMP and L3_DEFAULT. The + valid IP protocols are different for different load balancing + products as described in `Load balancing + features `__. + """ + UNDEFINED_I_P_PROTOCOL = 0 + AH = 2087 + ESP = 68962 + ICMP = 2241597 + SCTP = 2539724 + TCP = 82881 + UDP = 83873 + + class IpVersion(proto.Enum): + r"""The IP Version that will be used by this forwarding rule. + Valid options are IPV4 or IPV6. This can only be specified for + an external global forwarding rule. + """ + UNDEFINED_IP_VERSION = 0 + IPV4 = 2254341 + IPV6 = 2254343 + UNSPECIFIED_VERSION = 21850000 + + class LoadBalancingScheme(proto.Enum): + r"""Specifies the forwarding rule type. For more information + about forwarding rules, refer to Forwarding rule concepts. + """ + UNDEFINED_LOAD_BALANCING_SCHEME = 0 + EXTERNAL = 35607499 + INTERNAL = 279295677 + INTERNAL_MANAGED = 37350397 + INTERNAL_SELF_MANAGED = 236211150 + INVALID = 530283991 + + class NetworkTier(proto.Enum): + r"""This signifies the networking tier used for configuring this + load balancer and can only take the following values: PREMIUM, + STANDARD. For regional ForwardingRule, the valid values are + PREMIUM and STANDARD. For GlobalForwardingRule, the valid value + is PREMIUM. If this field is not specified, it is assumed to be + PREMIUM. If IPAddress is specified, this value must be equal to + the networkTier of the Address. + """ + UNDEFINED_NETWORK_TIER = 0 + PREMIUM = 399530551 + STANDARD = 484642493 + + class PscConnectionStatus(proto.Enum): + r"""""" + UNDEFINED_PSC_CONNECTION_STATUS = 0 + ACCEPTED = 246714279 + CLOSED = 380163436 + PENDING = 35394935 + REJECTED = 174130302 + STATUS_UNSPECIFIED = 42133066 + + I_p_address = proto.Field( + proto.STRING, + number=42976943, + optional=True, + ) + I_p_protocol = proto.Field( + proto.STRING, + number=488094525, + optional=True, + ) + all_ports = proto.Field( + proto.BOOL, + number=445175796, + optional=True, + ) + allow_global_access = proto.Field( + proto.BOOL, + number=499409674, + optional=True, + ) + backend_service = proto.Field( + proto.STRING, + number=306946058, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + ip_version = proto.Field( + proto.STRING, + number=294959552, + optional=True, + ) + is_mirroring_collector = proto.Field( + proto.BOOL, + number=119255164, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + load_balancing_scheme = proto.Field( + proto.STRING, + number=363890244, + optional=True, + ) + metadata_filters = proto.RepeatedField( + proto.MESSAGE, + number=464725739, + message='MetadataFilter', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + network_tier = proto.Field( + proto.STRING, + number=517397843, + optional=True, + ) + port_range = proto.Field( + proto.STRING, + number=217518079, + optional=True, + ) + ports = proto.RepeatedField( + proto.STRING, + number=106854418, + ) + psc_connection_id = proto.Field( + proto.UINT64, + number=292082397, + optional=True, + ) + psc_connection_status = proto.Field( + proto.STRING, + number=184149172, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + service_directory_registrations = proto.RepeatedField( + proto.MESSAGE, + number=223549694, + message='ForwardingRuleServiceDirectoryRegistration', + ) + service_label = proto.Field( + proto.STRING, + number=417008874, + optional=True, + ) + service_name = proto.Field( + proto.STRING, + number=359880149, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + optional=True, + ) + target = proto.Field( + proto.STRING, + number=192835985, + optional=True, + ) + + +class ForwardingRuleAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ForwardingRuleAggregatedList.ItemsEntry]): + A list of ForwardingRulesScopedList + resources. + kind (str): + [Output Only] Type of resource. Always + compute#forwardingRuleAggregatedList for lists of forwarding + rules. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='ForwardingRulesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ForwardingRuleList(proto.Message): + r"""Contains a list of ForwardingRule resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ForwardingRule]): + A list of ForwardingRule resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='ForwardingRule', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ForwardingRuleReference(proto.Message): + r""" + + Attributes: + forwarding_rule (str): + + This field is a member of `oneof`_ ``_forwarding_rule``. + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + optional=True, + ) + + +class ForwardingRuleServiceDirectoryRegistration(proto.Message): + r"""Describes the auto-registration of the Forwarding Rule to + Service Directory. The region and project of the Service + Directory resource generated from this registration will be the + same as this Forwarding Rule. + + Attributes: + namespace (str): + Service Directory namespace to register the + forwarding rule under. + + This field is a member of `oneof`_ ``_namespace``. + service (str): + Service Directory service to register the + forwarding rule under. + + This field is a member of `oneof`_ ``_service``. + service_directory_region (str): + [Optional] Service Directory region to register this global + forwarding rule under. Default to "us-central1". Only used + for PSC for Google APIs. All PSC for Google APIs Forwarding + Rules on the same network should use the same Service + Directory region. + + This field is a member of `oneof`_ ``_service_directory_region``. + """ + + namespace = proto.Field( + proto.STRING, + number=178476379, + optional=True, + ) + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + service_directory_region = proto.Field( + proto.STRING, + number=74030416, + optional=True, + ) + + +class ForwardingRulesScopedList(proto.Message): + r""" + + Attributes: + forwarding_rules (Sequence[google.cloud.compute_v1.types.ForwardingRule]): + A list of forwarding rules contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of forwarding rules when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + forwarding_rules = proto.RepeatedField( + proto.MESSAGE, + number=315821365, + message='ForwardingRule', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class GRPCHealthCheck(proto.Message): + r""" + + Attributes: + grpc_service_name (str): + The gRPC service name for the health check. This field is + optional. The value of grpc_service_name has the following + meanings by convention: - Empty service_name means the + overall status of all services at the backend. - Non-empty + service_name means the health of that gRPC service, as + defined by the owner of the service. The grpc_service_name + can only be ASCII. + + This field is a member of `oneof`_ ``_grpc_service_name``. + port (int): + The port number for the health check request. Must be + specified if port_name and port_specification are not set or + if port_specification is USE_FIXED_PORT. Valid values are 1 + through 65535. + + This field is a member of `oneof`_ ``_port``. + port_name (str): + Port name as defined in InstanceGroup#NamedPort#name. If + both port and port_name are defined, port takes precedence. + The port_name should conform to RFC1035. + + This field is a member of `oneof`_ ``_port_name``. + port_specification (str): + Specifies how port is selected for health checking, can be + one of following values: USE_FIXED_PORT: The port number in + port is used for health checking. USE_NAMED_PORT: The + portName is used for health checking. USE_SERVING_PORT: For + NetworkEndpointGroup, the port specified for each network + endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is + used for health checking. If not specified, gRPC health + check follows behavior specified in port and portName + fields. Check the PortSpecification enum for the list of + possible values. + + This field is a member of `oneof`_ ``_port_specification``. + """ + class PortSpecification(proto.Enum): + r"""Specifies how port is selected for health checking, can be one of + following values: USE_FIXED_PORT: The port number in port is used + for health checking. USE_NAMED_PORT: The portName is used for health + checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port + specified for each network endpoint is used for health checking. For + other backends, the port or named port specified in the Backend + Service is used for health checking. If not specified, gRPC health + check follows behavior specified in port and portName fields. + """ + UNDEFINED_PORT_SPECIFICATION = 0 + USE_FIXED_PORT = 190235748 + USE_NAMED_PORT = 349300671 + USE_SERVING_PORT = 362637516 + + grpc_service_name = proto.Field( + proto.STRING, + number=136533078, + optional=True, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + port_specification = proto.Field( + proto.STRING, + number=51590597, + optional=True, + ) + + +class GetAcceleratorTypeRequest(proto.Message): + r"""A request message for AcceleratorTypes.Get. See the method + description for details. + + Attributes: + accelerator_type (str): + Name of the accelerator type to return. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + accelerator_type = proto.Field( + proto.STRING, + number=138031246, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetAddressRequest(proto.Message): + r"""A request message for Addresses.Get. See the method + description for details. + + Attributes: + address (str): + Name of the address resource to return. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + address = proto.Field( + proto.STRING, + number=462920692, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetAssociationFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.GetAssociation. See + the method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to which the + queried rule belongs. + name (str): + The name of the association to get from the + firewall policy. + + This field is a member of `oneof`_ ``_name``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + + +class GetAutoscalerRequest(proto.Message): + r"""A request message for Autoscalers.Get. See the method + description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to return. + project (str): + Project ID for this request. + zone (str): + Name of the zone for this request. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetBackendBucketRequest(proto.Message): + r"""A request message for BackendBuckets.Get. See the method + description for details. + + Attributes: + backend_bucket (str): + Name of the BackendBucket resource to return. + project (str): + Project ID for this request. + """ + + backend_bucket = proto.Field( + proto.STRING, + number=91714037, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.Get. See the method + description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to + return. + project (str): + Project ID for this request. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetDiagnosticsInterconnectRequest(proto.Message): + r"""A request message for Interconnects.GetDiagnostics. See the + method description for details. + + Attributes: + interconnect (str): + Name of the interconnect resource to query. + project (str): + Project ID for this request. + """ + + interconnect = proto.Field( + proto.STRING, + number=224601230, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetDiskRequest(proto.Message): + r"""A request message for Disks.Get. See the method description + for details. + + Attributes: + disk (str): + Name of the persistent disk to return. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetDiskTypeRequest(proto.Message): + r"""A request message for DiskTypes.Get. See the method + description for details. + + Attributes: + disk_type (str): + Name of the disk type to return. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + disk_type = proto.Field( + proto.STRING, + number=93009052, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetEffectiveFirewallsInstanceRequest(proto.Message): + r"""A request message for Instances.GetEffectiveFirewalls. See + the method description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + network_interface (str): + The name of the network interface to get the + effective firewalls. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + network_interface = proto.Field( + proto.STRING, + number=365387880, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetEffectiveFirewallsNetworkRequest(proto.Message): + r"""A request message for Networks.GetEffectiveFirewalls. See the + method description for details. + + Attributes: + network (str): + Name of the network for this request. + project (str): + Project ID for this request. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetExternalVpnGatewayRequest(proto.Message): + r"""A request message for ExternalVpnGateways.Get. See the method + description for details. + + Attributes: + external_vpn_gateway (str): + Name of the externalVpnGateway to return. + project (str): + Project ID for this request. + """ + + external_vpn_gateway = proto.Field( + proto.STRING, + number=109898629, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.Get. See the method + description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to get. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + + +class GetFirewallRequest(proto.Message): + r"""A request message for Firewalls.Get. See the method + description for details. + + Attributes: + firewall (str): + Name of the firewall rule to return. + project (str): + Project ID for this request. + """ + + firewall = proto.Field( + proto.STRING, + number=511016192, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetForwardingRuleRequest(proto.Message): + r"""A request message for ForwardingRules.Get. See the method + description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource to + return. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetFromFamilyImageRequest(proto.Message): + r"""A request message for Images.GetFromFamily. See the method + description for details. + + Attributes: + family (str): + Name of the image family to search for. + project (str): + Project ID for this request. + """ + + family = proto.Field( + proto.STRING, + number=328751972, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetGlobalAddressRequest(proto.Message): + r"""A request message for GlobalAddresses.Get. See the method + description for details. + + Attributes: + address (str): + Name of the address resource to return. + project (str): + Project ID for this request. + """ + + address = proto.Field( + proto.STRING, + number=462920692, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetGlobalForwardingRuleRequest(proto.Message): + r"""A request message for GlobalForwardingRules.Get. See the + method description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource to + return. + project (str): + Project ID for this request. + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetGlobalNetworkEndpointGroupRequest(proto.Message): + r"""A request message for GlobalNetworkEndpointGroups.Get. See + the method description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group. It + should comply with RFC1035. + project (str): + Project ID for this request. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetGlobalOperationRequest(proto.Message): + r"""A request message for GlobalOperations.Get. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to return. + project (str): + Project ID for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetGlobalOrganizationOperationRequest(proto.Message): + r"""A request message for GlobalOrganizationOperations.Get. See + the method description for details. + + Attributes: + operation (str): + Name of the Operations resource to return. + parent_id (str): + Parent ID for this request. + + This field is a member of `oneof`_ ``_parent_id``. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + parent_id = proto.Field( + proto.STRING, + number=459714768, + optional=True, + ) + + +class GetGlobalPublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for GlobalPublicDelegatedPrefixes.Get. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix = proto.Field( + proto.STRING, + number=204238440, + ) + + +class GetGuestAttributesInstanceRequest(proto.Message): + r"""A request message for Instances.GetGuestAttributes. See the + method description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + project (str): + Project ID for this request. + query_path (str): + Specifies the guest attributes path to be + queried. + + This field is a member of `oneof`_ ``_query_path``. + variable_key (str): + Specifies the key for the guest attributes + entry. + + This field is a member of `oneof`_ ``_variable_key``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + query_path = proto.Field( + proto.STRING, + number=368591164, + optional=True, + ) + variable_key = proto.Field( + proto.STRING, + number=164364828, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetHealthBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.GetHealth. See the + method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to which + the queried instance belongs. + project (str): + + resource_group_reference_resource (google.cloud.compute_v1.types.ResourceGroupReference): + The body resource for this request + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource_group_reference_resource = proto.Field( + proto.MESSAGE, + number=112951123, + message='ResourceGroupReference', + ) + + +class GetHealthCheckRequest(proto.Message): + r"""A request message for HealthChecks.Get. See the method + description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to return. + project (str): + Project ID for this request. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetHealthRegionBackendServiceRequest(proto.Message): + r"""A request message for RegionBackendServices.GetHealth. See + the method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource for which + to get health. + project (str): + + region (str): + Name of the region scoping this request. + resource_group_reference_resource (google.cloud.compute_v1.types.ResourceGroupReference): + The body resource for this request + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource_group_reference_resource = proto.Field( + proto.MESSAGE, + number=112951123, + message='ResourceGroupReference', + ) + + +class GetHealthTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.GetHealth. See the method + description for details. + + Attributes: + instance_reference_resource (google.cloud.compute_v1.types.InstanceReference): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + target_pool (str): + Name of the TargetPool resource to which the + queried instance belongs. + """ + + instance_reference_resource = proto.Field( + proto.MESSAGE, + number=292926060, + message='InstanceReference', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + + +class GetIamPolicyDiskRequest(proto.Message): + r"""A request message for Disks.GetIamPolicy. See the method + description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetIamPolicyFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.GetIamPolicy. See the + method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyImageRequest(proto.Message): + r"""A request message for Images.GetIamPolicy. See the method + description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyInstanceRequest(proto.Message): + r"""A request message for Instances.GetIamPolicy. See the method + description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetIamPolicyInstanceTemplateRequest(proto.Message): + r"""A request message for InstanceTemplates.GetIamPolicy. See the + method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyLicenseRequest(proto.Message): + r"""A request message for Licenses.GetIamPolicy. See the method + description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.GetIamPolicy. See the method + description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetIamPolicyNodeTemplateRequest(proto.Message): + r"""A request message for NodeTemplates.GetIamPolicy. See the + method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.GetIamPolicy. See the + method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyReservationRequest(proto.Message): + r"""A request message for Reservations.GetIamPolicy. See the + method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetIamPolicyResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.GetIamPolicy. See the + method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicyServiceAttachmentRequest(proto.Message): + r"""A request message for ServiceAttachments.GetIamPolicy. See + the method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicySnapshotRequest(proto.Message): + r"""A request message for Snapshots.GetIamPolicy. See the method + description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetIamPolicySubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.GetIamPolicy. See the + method description for details. + + Attributes: + options_requested_policy_version (int): + Requested IAM Policy version. + + This field is a member of `oneof`_ ``_options_requested_policy_version``. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + """ + + options_requested_policy_version = proto.Field( + proto.INT32, + number=499220029, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class GetImageFamilyViewRequest(proto.Message): + r"""A request message for ImageFamilyViews.Get. See the method + description for details. + + Attributes: + family (str): + Name of the image family to search for. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + family = proto.Field( + proto.STRING, + number=328751972, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetImageRequest(proto.Message): + r"""A request message for Images.Get. See the method description + for details. + + Attributes: + image (str): + Name of the image resource to return. + project (str): + Project ID for this request. + """ + + image = proto.Field( + proto.STRING, + number=100313435, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.Get. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + project (str): + Project ID for this request. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetInstanceGroupRequest(proto.Message): + r"""A request message for InstanceGroups.Get. See the method + description for details. + + Attributes: + instance_group (str): + The name of the instance group. + project (str): + Project ID for this request. + zone (str): + The name of the zone where the instance group + is located. + """ + + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetInstanceRequest(proto.Message): + r"""A request message for Instances.Get. See the method + description for details. + + Attributes: + instance (str): + Name of the instance resource to return. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetInstanceTemplateRequest(proto.Message): + r"""A request message for InstanceTemplates.Get. See the method + description for details. + + Attributes: + instance_template (str): + The name of the instance template. + project (str): + Project ID for this request. + """ + + instance_template = proto.Field( + proto.STRING, + number=309248228, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetInterconnectAttachmentRequest(proto.Message): + r"""A request message for InterconnectAttachments.Get. See the + method description for details. + + Attributes: + interconnect_attachment (str): + Name of the interconnect attachment to + return. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + interconnect_attachment = proto.Field( + proto.STRING, + number=308135284, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetInterconnectLocationRequest(proto.Message): + r"""A request message for InterconnectLocations.Get. See the + method description for details. + + Attributes: + interconnect_location (str): + Name of the interconnect location to return. + project (str): + Project ID for this request. + """ + + interconnect_location = proto.Field( + proto.STRING, + number=492235846, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetInterconnectRequest(proto.Message): + r"""A request message for Interconnects.Get. See the method + description for details. + + Attributes: + interconnect (str): + Name of the interconnect to return. + project (str): + Project ID for this request. + """ + + interconnect = proto.Field( + proto.STRING, + number=224601230, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetLicenseCodeRequest(proto.Message): + r"""A request message for LicenseCodes.Get. See the method + description for details. + + Attributes: + license_code (str): + Number corresponding to the License code + resource to return. + project (str): + Project ID for this request. + """ + + license_code = proto.Field( + proto.STRING, + number=1467179, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetLicenseRequest(proto.Message): + r"""A request message for Licenses.Get. See the method + description for details. + + Attributes: + license_ (str): + Name of the License resource to return. + project (str): + Project ID for this request. + """ + + license_ = proto.Field( + proto.STRING, + number=166757441, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetMachineTypeRequest(proto.Message): + r"""A request message for MachineTypes.Get. See the method + description for details. + + Attributes: + machine_type (str): + Name of the machine type to return. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + machine_type = proto.Field( + proto.STRING, + number=227711026, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetNatMappingInfoRoutersRequest(proto.Message): + r"""A request message for Routers.GetNatMappingInfo. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + router (str): + Name of the Router resource to query for Nat + Mapping information of VM endpoints. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + router = proto.Field( + proto.STRING, + number=148608841, + ) + + +class GetNetworkEndpointGroupRequest(proto.Message): + r"""A request message for NetworkEndpointGroups.Get. See the + method description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group. It + should comply with RFC1035. + project (str): + Project ID for this request. + zone (str): + The name of the zone where the network + endpoint group is located. It should comply with + RFC1035. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetNetworkRequest(proto.Message): + r"""A request message for Networks.Get. See the method + description for details. + + Attributes: + network (str): + Name of the network to return. + project (str): + Project ID for this request. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.Get. See the method + description for details. + + Attributes: + node_group (str): + Name of the node group to return. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + node_group = proto.Field( + proto.STRING, + number=469958146, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetNodeTemplateRequest(proto.Message): + r"""A request message for NodeTemplates.Get. See the method + description for details. + + Attributes: + node_template (str): + Name of the node template to return. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + """ + + node_template = proto.Field( + proto.STRING, + number=323154455, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetNodeTypeRequest(proto.Message): + r"""A request message for NodeTypes.Get. See the method + description for details. + + Attributes: + node_type (str): + Name of the node type to return. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + node_type = proto.Field( + proto.STRING, + number=465832791, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetPacketMirroringRequest(proto.Message): + r"""A request message for PacketMirrorings.Get. See the method + description for details. + + Attributes: + packet_mirroring (str): + Name of the PacketMirroring resource to + return. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + packet_mirroring = proto.Field( + proto.STRING, + number=22305996, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetProjectRequest(proto.Message): + r"""A request message for Projects.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetPublicAdvertisedPrefixeRequest(proto.Message): + r"""A request message for PublicAdvertisedPrefixes.Get. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + public_advertised_prefix (str): + Name of the PublicAdvertisedPrefix resource + to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_advertised_prefix = proto.Field( + proto.STRING, + number=101874590, + ) + + +class GetPublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for PublicDelegatedPrefixes.Get. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix resource to + return. + region (str): + Name of the region of this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix = proto.Field( + proto.STRING, + number=204238440, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionAutoscalerRequest(proto.Message): + r"""A request message for RegionAutoscalers.Get. See the method + description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to return. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionBackendServiceRequest(proto.Message): + r"""A request message for RegionBackendServices.Get. See the + method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to + return. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionCommitmentRequest(proto.Message): + r"""A request message for RegionCommitments.Get. See the method + description for details. + + Attributes: + commitment (str): + Name of the commitment to return. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + commitment = proto.Field( + proto.STRING, + number=482134805, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.Get. See the method + description for details. + + Attributes: + disk (str): + Name of the regional persistent disk to + return. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionDiskTypeRequest(proto.Message): + r"""A request message for RegionDiskTypes.Get. See the method + description for details. + + Attributes: + disk_type (str): + Name of the disk type to return. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + """ + + disk_type = proto.Field( + proto.STRING, + number=93009052, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionHealthCheckRequest(proto.Message): + r"""A request message for RegionHealthChecks.Get. See the method + description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to return. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionHealthCheckServiceRequest(proto.Message): + r"""A request message for RegionHealthCheckServices.Get. See the + method description for details. + + Attributes: + health_check_service (str): + Name of the HealthCheckService to update. The + name must be 1-63 characters long, and comply + with RFC1035. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + health_check_service = proto.Field( + proto.STRING, + number=408374747, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for RegionInstanceGroupManagers.Get. See + the method description for details. + + Attributes: + instance_group_manager (str): + Name of the managed instance group to return. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionInstanceGroupRequest(proto.Message): + r"""A request message for RegionInstanceGroups.Get. See the + method description for details. + + Attributes: + instance_group (str): + Name of the instance group resource to + return. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionNetworkEndpointGroupRequest(proto.Message): + r"""A request message for RegionNetworkEndpointGroups.Get. See + the method description for details. + + Attributes: + network_endpoint_group (str): + The name of the network endpoint group. It + should comply with RFC1035. + project (str): + Project ID for this request. + region (str): + The name of the region where the network + endpoint group is located. It should comply with + RFC1035. + """ + + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionNotificationEndpointRequest(proto.Message): + r"""A request message for RegionNotificationEndpoints.Get. See + the method description for details. + + Attributes: + notification_endpoint (str): + Name of the NotificationEndpoint resource to + return. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + """ + + notification_endpoint = proto.Field( + proto.STRING, + number=376807017, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionOperationRequest(proto.Message): + r"""A request message for RegionOperations.Get. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to return. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionRequest(proto.Message): + r"""A request message for Regions.Get. See the method description + for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class GetRegionSslCertificateRequest(proto.Message): + r"""A request message for RegionSslCertificates.Get. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + ssl_certificate (str): + Name of the SslCertificate resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + ssl_certificate = proto.Field( + proto.STRING, + number=46443492, + ) + + +class GetRegionTargetHttpProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpProxies.Get. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + target_http_proxy (str): + Name of the TargetHttpProxy resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + target_http_proxy = proto.Field( + proto.STRING, + number=206872421, + ) + + +class GetRegionTargetHttpsProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpsProxies.Get. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + target_https_proxy (str): + Name of the TargetHttpsProxy resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class GetRegionUrlMapRequest(proto.Message): + r"""A request message for RegionUrlMaps.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + url_map (str): + Name of the UrlMap resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + + +class GetReservationRequest(proto.Message): + r"""A request message for Reservations.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + reservation (str): + Name of the reservation to retrieve. + zone (str): + Name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + reservation = proto.Field( + proto.STRING, + number=47530956, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + resource_policy (str): + Name of the resource policy to retrieve. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource_policy = proto.Field( + proto.STRING, + number=159240835, + ) + + +class GetRouteRequest(proto.Message): + r"""A request message for Routes.Get. See the method description + for details. + + Attributes: + project (str): + Project ID for this request. + route (str): + Name of the Route resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + route = proto.Field( + proto.STRING, + number=108704329, + ) + + +class GetRouterRequest(proto.Message): + r"""A request message for Routers.Get. See the method description + for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + router (str): + Name of the Router resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + router = proto.Field( + proto.STRING, + number=148608841, + ) + + +class GetRouterStatusRouterRequest(proto.Message): + r"""A request message for Routers.GetRouterStatus. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + router (str): + Name of the Router resource to query. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + router = proto.Field( + proto.STRING, + number=148608841, + ) + + +class GetRuleFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.GetRule. See the + method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to which the + queried rule belongs. + priority (int): + The priority of the rule to get from the + firewall policy. + + This field is a member of `oneof`_ ``_priority``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + + +class GetRuleSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.GetRule. See the + method description for details. + + Attributes: + priority (int): + The priority of the rule to get from the + security policy. + + This field is a member of `oneof`_ ``_priority``. + project (str): + Project ID for this request. + security_policy (str): + Name of the security policy to which the + queried rule belongs. + """ + + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + ) + + +class GetScreenshotInstanceRequest(proto.Message): + r"""A request message for Instances.GetScreenshot. See the method + description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + security_policy (str): + Name of the security policy to get. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + ) + + +class GetSerialPortOutputInstanceRequest(proto.Message): + r"""A request message for Instances.GetSerialPortOutput. See the + method description for details. + + Attributes: + instance (str): + Name of the instance for this request. + port (int): + Specifies which COM or serial port to + retrieve data from. + + This field is a member of `oneof`_ ``_port``. + project (str): + Project ID for this request. + start (int): + Specifies the starting byte position of the output to + return. To start with the first byte of output to the + specified port, omit this field or set it to ``0``. If the + output for that byte position is available, this field + matches the ``start`` parameter sent with the request. If + the amount of serial console output exceeds the size of the + buffer (1 MB), the oldest output is discarded and is no + longer available. If the requested start position refers to + discarded output, the start position is adjusted to the + oldest output still available, and the adjusted start + position is returned as the ``start`` property value. You + can also provide a negative start position, which translates + to the most recent number of bytes written to the serial + port. For example, -3 is interpreted as the most recent 3 + bytes written to the serial console. + + This field is a member of `oneof`_ ``_start``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + start = proto.Field( + proto.INT64, + number=109757538, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetServiceAttachmentRequest(proto.Message): + r"""A request message for ServiceAttachments.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region of this request. + service_attachment (str): + Name of the ServiceAttachment resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + service_attachment = proto.Field( + proto.STRING, + number=338957549, + ) + + +class GetShieldedInstanceIdentityInstanceRequest(proto.Message): + r"""A request message for Instances.GetShieldedInstanceIdentity. + See the method description for details. + + Attributes: + instance (str): + Name or id of the instance scoping this + request. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetSnapshotRequest(proto.Message): + r"""A request message for Snapshots.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + snapshot (str): + Name of the Snapshot resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + snapshot = proto.Field( + proto.STRING, + number=284874180, + ) + + +class GetSslCertificateRequest(proto.Message): + r"""A request message for SslCertificates.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + ssl_certificate (str): + Name of the SslCertificate resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + ssl_certificate = proto.Field( + proto.STRING, + number=46443492, + ) + + +class GetSslPolicyRequest(proto.Message): + r"""A request message for SslPolicies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + ssl_policy (str): + Name of the SSL policy to update. The name + must be 1-63 characters long, and comply with + RFC1035. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + ssl_policy = proto.Field( + proto.STRING, + number=295190213, + ) + + +class GetStatusVpnGatewayRequest(proto.Message): + r"""A request message for VpnGateways.GetStatus. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + vpn_gateway (str): + Name of the VPN gateway to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + vpn_gateway = proto.Field( + proto.STRING, + number=406684153, + ) + + +class GetSubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + subnetwork (str): + Name of the Subnetwork resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + ) + + +class GetTargetGrpcProxyRequest(proto.Message): + r"""A request message for TargetGrpcProxies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + target_grpc_proxy (str): + Name of the TargetGrpcProxy resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + target_grpc_proxy = proto.Field( + proto.STRING, + number=5020283, + ) + + +class GetTargetHttpProxyRequest(proto.Message): + r"""A request message for TargetHttpProxies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + target_http_proxy (str): + Name of the TargetHttpProxy resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + target_http_proxy = proto.Field( + proto.STRING, + number=206872421, + ) + + +class GetTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + target_https_proxy (str): + Name of the TargetHttpsProxy resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class GetTargetInstanceRequest(proto.Message): + r"""A request message for TargetInstances.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + target_instance (str): + Name of the TargetInstance resource to + return. + zone (str): + Name of the zone scoping this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + target_instance = proto.Field( + proto.STRING, + number=289769347, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + target_pool (str): + Name of the TargetPool resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + + +class GetTargetSslProxyRequest(proto.Message): + r"""A request message for TargetSslProxies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + target_ssl_proxy (str): + Name of the TargetSslProxy resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + target_ssl_proxy = proto.Field( + proto.STRING, + number=338795853, + ) + + +class GetTargetTcpProxyRequest(proto.Message): + r"""A request message for TargetTcpProxies.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + target_tcp_proxy (str): + Name of the TargetTcpProxy resource to + return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + target_tcp_proxy = proto.Field( + proto.STRING, + number=503065442, + ) + + +class GetTargetVpnGatewayRequest(proto.Message): + r"""A request message for TargetVpnGateways.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + target_vpn_gateway (str): + Name of the target VPN gateway to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + target_vpn_gateway = proto.Field( + proto.STRING, + number=532512843, + ) + + +class GetUrlMapRequest(proto.Message): + r"""A request message for UrlMaps.Get. See the method description + for details. + + Attributes: + project (str): + Project ID for this request. + url_map (str): + Name of the UrlMap resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + + +class GetVpnGatewayRequest(proto.Message): + r"""A request message for VpnGateways.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + vpn_gateway (str): + Name of the VPN gateway to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + vpn_gateway = proto.Field( + proto.STRING, + number=406684153, + ) + + +class GetVpnTunnelRequest(proto.Message): + r"""A request message for VpnTunnels.Get. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + vpn_tunnel (str): + Name of the VpnTunnel resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + vpn_tunnel = proto.Field( + proto.STRING, + number=143821331, + ) + + +class GetXpnHostProjectRequest(proto.Message): + r"""A request message for Projects.GetXpnHost. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class GetXpnResourcesProjectsRequest(proto.Message): + r"""A request message for Projects.GetXpnResources. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class GetZoneOperationRequest(proto.Message): + r"""A request message for ZoneOperations.Get. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to return. + project (str): + Project ID for this request. + zone (str): + Name of the zone for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GetZoneRequest(proto.Message): + r"""A request message for Zones.Get. See the method description + for details. + + Attributes: + project (str): + Project ID for this request. + zone (str): + Name of the zone resource to return. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class GlobalNetworkEndpointGroupsAttachEndpointsRequest(proto.Message): + r""" + + Attributes: + network_endpoints (Sequence[google.cloud.compute_v1.types.NetworkEndpoint]): + The list of network endpoints to be attached. + """ + + network_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=149850285, + message='NetworkEndpoint', + ) + + +class GlobalNetworkEndpointGroupsDetachEndpointsRequest(proto.Message): + r""" + + Attributes: + network_endpoints (Sequence[google.cloud.compute_v1.types.NetworkEndpoint]): + The list of network endpoints to be detached. + """ + + network_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=149850285, + message='NetworkEndpoint', + ) + + +class GlobalOrganizationSetPolicyRequest(proto.Message): + r""" + + Attributes: + bindings (Sequence[google.cloud.compute_v1.types.Binding]): + Flatten Policy to create a backward + compatible wire-format. Deprecated. Use 'policy' + to specify bindings. + etag (str): + Flatten Policy to create a backward + compatible wire-format. Deprecated. Use 'policy' + to specify the etag. + + This field is a member of `oneof`_ ``_etag``. + policy (google.cloud.compute_v1.types.Policy): + REQUIRED: The complete policy to be applied + to the 'resource'. The size of the policy is + limited to a few 10s of KB. An empty policy is + in general a valid policy but certain services + (like Projects) might reject them. + + This field is a member of `oneof`_ ``_policy``. + """ + + bindings = proto.RepeatedField( + proto.MESSAGE, + number=403251854, + message='Binding', + ) + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + policy = proto.Field( + proto.MESSAGE, + number=91071794, + optional=True, + message='Policy', + ) + + +class GlobalSetLabelsRequest(proto.Message): + r""" + + Attributes: + label_fingerprint (str): + The fingerprint of the previous set of labels + for this resource, used to detect conflicts. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash when updating or + changing labels, otherwise the request will fail + with error 412 conditionNotMet. Make a get() + request to the resource to get the latest + fingerprint. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.GlobalSetLabelsRequest.LabelsEntry]): + A list of labels to apply for this resource. Each label key + & value must comply with RFC1035. Specifically, the name + must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + For example, "webserver-frontend": "images". A label value + can also be empty (e.g. "my-label": ""). + """ + + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + + +class GlobalSetPolicyRequest(proto.Message): + r""" + + Attributes: + bindings (Sequence[google.cloud.compute_v1.types.Binding]): + Flatten Policy to create a backward + compatible wire-format. Deprecated. Use 'policy' + to specify bindings. + etag (str): + Flatten Policy to create a backward + compatible wire-format. Deprecated. Use 'policy' + to specify the etag. + + This field is a member of `oneof`_ ``_etag``. + policy (google.cloud.compute_v1.types.Policy): + REQUIRED: The complete policy to be applied + to the 'resource'. The size of the policy is + limited to a few 10s of KB. An empty policy is + in general a valid policy but certain services + (like Projects) might reject them. + + This field is a member of `oneof`_ ``_policy``. + """ + + bindings = proto.RepeatedField( + proto.MESSAGE, + number=403251854, + message='Binding', + ) + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + policy = proto.Field( + proto.MESSAGE, + number=91071794, + optional=True, + message='Policy', + ) + + +class GuestAttributes(proto.Message): + r"""A guest attributes entry. + + Attributes: + kind (str): + [Output Only] Type of the resource. Always + compute#guestAttributes for guest attributes entry. + + This field is a member of `oneof`_ ``_kind``. + query_path (str): + The path to be queried. This can be the + default namespace ('') or a nested namespace + ('\/') or a specified key ('\/\'). + + This field is a member of `oneof`_ ``_query_path``. + query_value (google.cloud.compute_v1.types.GuestAttributesValue): + [Output Only] The value of the requested queried path. + + This field is a member of `oneof`_ ``_query_value``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + variable_key (str): + The key to search for. + + This field is a member of `oneof`_ ``_variable_key``. + variable_value (str): + [Output Only] The value found for the requested key. + + This field is a member of `oneof`_ ``_variable_value``. + """ + + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + query_path = proto.Field( + proto.STRING, + number=368591164, + optional=True, + ) + query_value = proto.Field( + proto.MESSAGE, + number=157570874, + optional=True, + message='GuestAttributesValue', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + variable_key = proto.Field( + proto.STRING, + number=164364828, + optional=True, + ) + variable_value = proto.Field( + proto.STRING, + number=124582382, + optional=True, + ) + + +class GuestAttributesEntry(proto.Message): + r"""A guest attributes namespace/key/value entry. + + Attributes: + key (str): + Key for the guest attribute entry. + + This field is a member of `oneof`_ ``_key``. + namespace (str): + Namespace for the guest attribute entry. + + This field is a member of `oneof`_ ``_namespace``. + value (str): + Value for the guest attribute entry. + + This field is a member of `oneof`_ ``_value``. + """ + + key = proto.Field( + proto.STRING, + number=106079, + optional=True, + ) + namespace = proto.Field( + proto.STRING, + number=178476379, + optional=True, + ) + value = proto.Field( + proto.STRING, + number=111972721, + optional=True, + ) + + +class GuestAttributesValue(proto.Message): + r"""Array of guest attribute namespace/key/value tuples. + + Attributes: + items (Sequence[google.cloud.compute_v1.types.GuestAttributesEntry]): + + """ + + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='GuestAttributesEntry', + ) + + +class GuestOsFeature(proto.Message): + r"""Guest OS features. + + Attributes: + type_ (str): + The ID of a supported feature. Read Enabling + guest operating system features to see a list of + available options. Check the Type enum for the + list of possible values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""The ID of a supported feature. Read Enabling guest operating + system features to see a list of available options. + """ + UNDEFINED_TYPE = 0 + FEATURE_TYPE_UNSPECIFIED = 531767259 + GVNIC = 68209305 + MULTI_IP_SUBNET = 151776719 + SECURE_BOOT = 376811194 + SEV_CAPABLE = 87083793 + UEFI_COMPATIBLE = 195865408 + VIRTIO_SCSI_MULTIQUEUE = 201597069 + WINDOWS = 456863331 + + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class HTTP2HealthCheck(proto.Message): + r""" + + Attributes: + host (str): + The value of the host header in the HTTP/2 + health check request. If left empty (default + value), the IP on behalf of which this health + check is performed will be used. + + This field is a member of `oneof`_ ``_host``. + port (int): + The TCP port number for the health check + request. The default value is 443. Valid values + are 1 through 65535. + + This field is a member of `oneof`_ ``_port``. + port_name (str): + Port name as defined in InstanceGroup#NamedPort#name. If + both port and port_name are defined, port takes precedence. + + This field is a member of `oneof`_ ``_port_name``. + port_specification (str): + Specifies how port is selected for health checking, can be + one of following values: USE_FIXED_PORT: The port number in + port is used for health checking. USE_NAMED_PORT: The + portName is used for health checking. USE_SERVING_PORT: For + NetworkEndpointGroup, the port specified for each network + endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is + used for health checking. If not specified, HTTP2 health + check follows behavior specified in port and portName + fields. Check the PortSpecification enum for the list of + possible values. + + This field is a member of `oneof`_ ``_port_specification``. + proxy_header (str): + Specifies the type of proxy header to append before sending + data to the backend, either NONE or PROXY_V1. The default is + NONE. Check the ProxyHeader enum for the list of possible + values. + + This field is a member of `oneof`_ ``_proxy_header``. + request_path (str): + The request path of the HTTP/2 health check + request. The default value is /. + + This field is a member of `oneof`_ ``_request_path``. + response (str): + The string to match anywhere in the first + 1024 bytes of the response body. If left empty + (the default value), the status code determines + health. The response data can only be ASCII. + + This field is a member of `oneof`_ ``_response``. + """ + class PortSpecification(proto.Enum): + r"""Specifies how port is selected for health checking, can be one of + following values: USE_FIXED_PORT: The port number in port is used + for health checking. USE_NAMED_PORT: The portName is used for health + checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port + specified for each network endpoint is used for health checking. For + other backends, the port or named port specified in the Backend + Service is used for health checking. If not specified, HTTP2 health + check follows behavior specified in port and portName fields. + """ + UNDEFINED_PORT_SPECIFICATION = 0 + USE_FIXED_PORT = 190235748 + USE_NAMED_PORT = 349300671 + USE_SERVING_PORT = 362637516 + + class ProxyHeader(proto.Enum): + r"""Specifies the type of proxy header to append before sending data to + the backend, either NONE or PROXY_V1. The default is NONE. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + host = proto.Field( + proto.STRING, + number=3208616, + optional=True, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + port_specification = proto.Field( + proto.STRING, + number=51590597, + optional=True, + ) + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + request_path = proto.Field( + proto.STRING, + number=229403605, + optional=True, + ) + response = proto.Field( + proto.STRING, + number=196547649, + optional=True, + ) + + +class HTTPHealthCheck(proto.Message): + r""" + + Attributes: + host (str): + The value of the host header in the HTTP + health check request. If left empty (default + value), the IP on behalf of which this health + check is performed will be used. + + This field is a member of `oneof`_ ``_host``. + port (int): + The TCP port number for the health check + request. The default value is 80. Valid values + are 1 through 65535. + + This field is a member of `oneof`_ ``_port``. + port_name (str): + Port name as defined in InstanceGroup#NamedPort#name. If + both port and port_name are defined, port takes precedence. + + This field is a member of `oneof`_ ``_port_name``. + port_specification (str): + Specifies how port is selected for health checking, can be + one of following values: USE_FIXED_PORT: The port number in + port is used for health checking. USE_NAMED_PORT: The + portName is used for health checking. USE_SERVING_PORT: For + NetworkEndpointGroup, the port specified for each network + endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is + used for health checking. If not specified, HTTP health + check follows behavior specified in port and portName + fields. Check the PortSpecification enum for the list of + possible values. + + This field is a member of `oneof`_ ``_port_specification``. + proxy_header (str): + Specifies the type of proxy header to append before sending + data to the backend, either NONE or PROXY_V1. The default is + NONE. Check the ProxyHeader enum for the list of possible + values. + + This field is a member of `oneof`_ ``_proxy_header``. + request_path (str): + The request path of the HTTP health check + request. The default value is /. + + This field is a member of `oneof`_ ``_request_path``. + response (str): + The string to match anywhere in the first + 1024 bytes of the response body. If left empty + (the default value), the status code determines + health. The response data can only be ASCII. + + This field is a member of `oneof`_ ``_response``. + """ + class PortSpecification(proto.Enum): + r"""Specifies how port is selected for health checking, can be one of + following values: USE_FIXED_PORT: The port number in port is used + for health checking. USE_NAMED_PORT: The portName is used for health + checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port + specified for each network endpoint is used for health checking. For + other backends, the port or named port specified in the Backend + Service is used for health checking. If not specified, HTTP health + check follows behavior specified in port and portName fields. + """ + UNDEFINED_PORT_SPECIFICATION = 0 + USE_FIXED_PORT = 190235748 + USE_NAMED_PORT = 349300671 + USE_SERVING_PORT = 362637516 + + class ProxyHeader(proto.Enum): + r"""Specifies the type of proxy header to append before sending data to + the backend, either NONE or PROXY_V1. The default is NONE. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + host = proto.Field( + proto.STRING, + number=3208616, + optional=True, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + port_specification = proto.Field( + proto.STRING, + number=51590597, + optional=True, + ) + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + request_path = proto.Field( + proto.STRING, + number=229403605, + optional=True, + ) + response = proto.Field( + proto.STRING, + number=196547649, + optional=True, + ) + + +class HTTPSHealthCheck(proto.Message): + r""" + + Attributes: + host (str): + The value of the host header in the HTTPS + health check request. If left empty (default + value), the IP on behalf of which this health + check is performed will be used. + + This field is a member of `oneof`_ ``_host``. + port (int): + The TCP port number for the health check + request. The default value is 443. Valid values + are 1 through 65535. + + This field is a member of `oneof`_ ``_port``. + port_name (str): + Port name as defined in InstanceGroup#NamedPort#name. If + both port and port_name are defined, port takes precedence. + + This field is a member of `oneof`_ ``_port_name``. + port_specification (str): + Specifies how port is selected for health checking, can be + one of following values: USE_FIXED_PORT: The port number in + port is used for health checking. USE_NAMED_PORT: The + portName is used for health checking. USE_SERVING_PORT: For + NetworkEndpointGroup, the port specified for each network + endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is + used for health checking. If not specified, HTTPS health + check follows behavior specified in port and portName + fields. Check the PortSpecification enum for the list of + possible values. + + This field is a member of `oneof`_ ``_port_specification``. + proxy_header (str): + Specifies the type of proxy header to append before sending + data to the backend, either NONE or PROXY_V1. The default is + NONE. Check the ProxyHeader enum for the list of possible + values. + + This field is a member of `oneof`_ ``_proxy_header``. + request_path (str): + The request path of the HTTPS health check + request. The default value is /. + + This field is a member of `oneof`_ ``_request_path``. + response (str): + The string to match anywhere in the first + 1024 bytes of the response body. If left empty + (the default value), the status code determines + health. The response data can only be ASCII. + + This field is a member of `oneof`_ ``_response``. + """ + class PortSpecification(proto.Enum): + r"""Specifies how port is selected for health checking, can be one of + following values: USE_FIXED_PORT: The port number in port is used + for health checking. USE_NAMED_PORT: The portName is used for health + checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port + specified for each network endpoint is used for health checking. For + other backends, the port or named port specified in the Backend + Service is used for health checking. If not specified, HTTPS health + check follows behavior specified in port and portName fields. + """ + UNDEFINED_PORT_SPECIFICATION = 0 + USE_FIXED_PORT = 190235748 + USE_NAMED_PORT = 349300671 + USE_SERVING_PORT = 362637516 + + class ProxyHeader(proto.Enum): + r"""Specifies the type of proxy header to append before sending data to + the backend, either NONE or PROXY_V1. The default is NONE. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + host = proto.Field( + proto.STRING, + number=3208616, + optional=True, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + port_specification = proto.Field( + proto.STRING, + number=51590597, + optional=True, + ) + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + request_path = proto.Field( + proto.STRING, + number=229403605, + optional=True, + ) + response = proto.Field( + proto.STRING, + number=196547649, + optional=True, + ) + + +class HealthCheck(proto.Message): + r"""Represents a Health Check resource. Google Compute Engine has two + Health Check resources: \* + `Global `__ \* + `Regional `__ + Internal HTTP(S) load balancers must use regional health checks + (``compute.v1.regionHealthChecks``). Traffic Director must use + global health checks (``compute.v1.HealthChecks``). Internal TCP/UDP + load balancers can use either regional or global health checks + (``compute.v1.regionHealthChecks`` or ``compute.v1.HealthChecks``). + External HTTP(S), TCP proxy, and SSL proxy load balancers as well as + managed instance group auto-healing must use global health checks + (``compute.v1.HealthChecks``). Backend service-based network load + balancers must use regional health checks + (``compute.v1.regionHealthChecks``). Target pool-based network load + balancers must use legacy HTTP health checks + (``compute.v1.httpHealthChecks``). For more information, see Health + checks overview. + + Attributes: + check_interval_sec (int): + How often (in seconds) to send a health + check. The default value is 5 seconds. + + This field is a member of `oneof`_ ``_check_interval_sec``. + creation_timestamp (str): + [Output Only] Creation timestamp in 3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + grpc_health_check (google.cloud.compute_v1.types.GRPCHealthCheck): + + This field is a member of `oneof`_ ``_grpc_health_check``. + healthy_threshold (int): + A so-far unhealthy instance will be marked + healthy after this many consecutive successes. + The default value is 2. + + This field is a member of `oneof`_ ``_healthy_threshold``. + http2_health_check (google.cloud.compute_v1.types.HTTP2HealthCheck): + + This field is a member of `oneof`_ ``_http2_health_check``. + http_health_check (google.cloud.compute_v1.types.HTTPHealthCheck): + + This field is a member of `oneof`_ ``_http_health_check``. + https_health_check (google.cloud.compute_v1.types.HTTPSHealthCheck): + + This field is a member of `oneof`_ ``_https_health_check``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + Type of the resource. + + This field is a member of `oneof`_ ``_kind``. + log_config (google.cloud.compute_v1.types.HealthCheckLogConfig): + Configure logging on this health check. + + This field is a member of `oneof`_ ``_log_config``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. For example, a name that is 1-63 + characters long, matches the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?``, and otherwise complies with + RFC1035. This regular expression describes a name where the + first character is a lowercase letter, and all following + characters are a dash, lowercase letter, or digit, except + the last character, which isn't a dash. + + This field is a member of `oneof`_ ``_name``. + region (str): + [Output Only] Region where the health check resides. Not + applicable to global health checks. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + ssl_health_check (google.cloud.compute_v1.types.SSLHealthCheck): + + This field is a member of `oneof`_ ``_ssl_health_check``. + tcp_health_check (google.cloud.compute_v1.types.TCPHealthCheck): + + This field is a member of `oneof`_ ``_tcp_health_check``. + timeout_sec (int): + How long (in seconds) to wait before claiming + failure. The default value is 5 seconds. It is + invalid for timeoutSec to have greater value + than checkIntervalSec. + + This field is a member of `oneof`_ ``_timeout_sec``. + type_ (str): + Specifies the type of the healthCheck, either + TCP, SSL, HTTP, HTTPS or HTTP2. If not + specified, the default is TCP. Exactly one of + the protocol-specific health check field must be + specified, which must match type field. Check + the Type enum for the list of possible values. + + This field is a member of `oneof`_ ``_type``. + unhealthy_threshold (int): + A so-far healthy instance will be marked + unhealthy after this many consecutive failures. + The default value is 2. + + This field is a member of `oneof`_ ``_unhealthy_threshold``. + """ + class Type(proto.Enum): + r"""Specifies the type of the healthCheck, either TCP, SSL, HTTP, + HTTPS or HTTP2. If not specified, the default is TCP. Exactly + one of the protocol-specific health check field must be + specified, which must match type field. + """ + UNDEFINED_TYPE = 0 + GRPC = 2196510 + HTTP = 2228360 + HTTP2 = 69079210 + HTTPS = 69079243 + INVALID = 530283991 + SSL = 82412 + TCP = 82881 + + check_interval_sec = proto.Field( + proto.INT32, + number=345561006, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + grpc_health_check = proto.Field( + proto.MESSAGE, + number=85529574, + optional=True, + message='GRPCHealthCheck', + ) + healthy_threshold = proto.Field( + proto.INT32, + number=403212361, + optional=True, + ) + http2_health_check = proto.Field( + proto.MESSAGE, + number=11360986, + optional=True, + message='HTTP2HealthCheck', + ) + http_health_check = proto.Field( + proto.MESSAGE, + number=412586940, + optional=True, + message='HTTPHealthCheck', + ) + https_health_check = proto.Field( + proto.MESSAGE, + number=436046905, + optional=True, + message='HTTPSHealthCheck', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + log_config = proto.Field( + proto.MESSAGE, + number=351299741, + optional=True, + message='HealthCheckLogConfig', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + ssl_health_check = proto.Field( + proto.MESSAGE, + number=280032440, + optional=True, + message='SSLHealthCheck', + ) + tcp_health_check = proto.Field( + proto.MESSAGE, + number=469980419, + optional=True, + message='TCPHealthCheck', + ) + timeout_sec = proto.Field( + proto.INT32, + number=79994995, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + unhealthy_threshold = proto.Field( + proto.INT32, + number=227958480, + optional=True, + ) + + +class HealthCheckList(proto.Message): + r"""Contains a list of HealthCheck resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.HealthCheck]): + A list of HealthCheck resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='HealthCheck', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class HealthCheckLogConfig(proto.Message): + r"""Configuration of logging on a health check. If logging is + enabled, logs will be exported to Stackdriver. + + Attributes: + enable (bool): + Indicates whether or not to export logs. This + is false by default, which means no health check + logging will be done. + + This field is a member of `oneof`_ ``_enable``. + """ + + enable = proto.Field( + proto.BOOL, + number=311764355, + optional=True, + ) + + +class HealthCheckReference(proto.Message): + r"""A full or valid partial URL to a health check. For example, + the following are valid URLs: - + https://www.googleapis.com/compute/beta/projects/project- + id/global/httpHealthChecks/health-check - projects/project- + id/global/httpHealthChecks/health-check - + global/httpHealthChecks/health-check + + Attributes: + health_check (str): + + This field is a member of `oneof`_ ``_health_check``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + optional=True, + ) + + +class HealthCheckService(proto.Message): + r"""Represents a Health-Check as a Service resource. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a HealthCheckService. An + up-to-date fingerprint must be provided in order + to patch/update the HealthCheckService; + Otherwise, the request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve the + HealthCheckService. + + This field is a member of `oneof`_ ``_fingerprint``. + health_checks (Sequence[str]): + A list of URLs to the HealthCheck resources. Must have at + least one HealthCheck, and not more than 10. HealthCheck + resources must have portSpecification=USE_SERVING_PORT or + portSpecification=USE_FIXED_PORT. For regional + HealthCheckService, the HealthCheck must be regional and in + the same region. For global HealthCheckService, HealthCheck + must be global. Mix of regional and global HealthChecks is + not supported. Multiple regional HealthChecks must belong to + the same region. Regional HealthChecks must belong to the + same region as zones of NEGs. + health_status_aggregation_policy (str): + Optional. Policy for how the results from multiple health + checks for the same endpoint are aggregated. Defaults to + NO_AGGREGATION if unspecified. - NO_AGGREGATION. An + EndpointHealth message is returned for each pair in the + health check service. - AND. If any health check of an + endpoint reports UNHEALTHY, then UNHEALTHY is the + HealthState of the endpoint. If all health checks report + HEALTHY, the HealthState of the endpoint is HEALTHY. . Check + the HealthStatusAggregationPolicy enum for the list of + possible values. + + This field is a member of `oneof`_ ``_health_status_aggregation_policy``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output only] Type of the resource. Always + compute#healthCheckServicefor health check services. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network_endpoint_groups (Sequence[str]): + A list of URLs to the NetworkEndpointGroup + resources. Must not have more than 100. For + regional HealthCheckService, NEGs must be in + zones in the region of the HealthCheckService. + notification_endpoints (Sequence[str]): + A list of URLs to the NotificationEndpoint + resources. Must not have more than 10. A list of + endpoints for receiving notifications of change + in health status. For regional + HealthCheckService, NotificationEndpoint must be + regional and in the same region. For global + HealthCheckService, NotificationEndpoint must be + global. + region (str): + [Output Only] URL of the region where the health check + service resides. This field is not applicable to global + health check services. You must specify this field as part + of the HTTP request URL. It is not settable as a field in + the request body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + """ + class HealthStatusAggregationPolicy(proto.Enum): + r"""Optional. Policy for how the results from multiple health checks for + the same endpoint are aggregated. Defaults to NO_AGGREGATION if + unspecified. - NO_AGGREGATION. An EndpointHealth message is returned + for each pair in the health check service. - AND. If any health + check of an endpoint reports UNHEALTHY, then UNHEALTHY is the + HealthState of the endpoint. If all health checks report HEALTHY, + the HealthState of the endpoint is HEALTHY. . + """ + UNDEFINED_HEALTH_STATUS_AGGREGATION_POLICY = 0 + AND = 64951 + NO_AGGREGATION = 426445124 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + health_checks = proto.RepeatedField( + proto.STRING, + number=448370606, + ) + health_status_aggregation_policy = proto.Field( + proto.STRING, + number=253163129, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network_endpoint_groups = proto.RepeatedField( + proto.STRING, + number=29346733, + ) + notification_endpoints = proto.RepeatedField( + proto.STRING, + number=406728490, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class HealthCheckServiceReference(proto.Message): + r"""A full or valid partial URL to a health check service. For + example, the following are valid URLs: - + https://www.googleapis.com/compute/beta/projects/project- + id/regions/us-west1/healthCheckServices/health-check-service - + projects/project-id/regions/us-west1/healthCheckServices/health- + check-service - regions/us-west1/healthCheckServices/health- + check-service + + Attributes: + health_check_service (str): + + This field is a member of `oneof`_ ``_health_check_service``. + """ + + health_check_service = proto.Field( + proto.STRING, + number=408374747, + optional=True, + ) + + +class HealthCheckServicesList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.HealthCheckService]): + A list of HealthCheckService resources. + kind (str): + [Output Only] Type of the resource. Always + compute#healthCheckServicesList for lists of + HealthCheckServices. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='HealthCheckService', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class HealthChecksAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.HealthChecksAggregatedList.ItemsEntry]): + A list of HealthChecksScopedList resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='HealthChecksScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class HealthChecksScopedList(proto.Message): + r""" + + Attributes: + health_checks (Sequence[google.cloud.compute_v1.types.HealthCheck]): + A list of HealthChecks contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of backend services when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + health_checks = proto.RepeatedField( + proto.MESSAGE, + number=448370606, + message='HealthCheck', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class HealthStatus(proto.Message): + r""" + + Attributes: + annotations (Sequence[google.cloud.compute_v1.types.HealthStatus.AnnotationsEntry]): + Metadata defined as annotations for network + endpoint. + forwarding_rule (str): + URL of the forwarding rule associated with + the health status of the instance. + + This field is a member of `oneof`_ ``_forwarding_rule``. + forwarding_rule_ip (str): + A forwarding rule IP address assigned to this + instance. + + This field is a member of `oneof`_ ``_forwarding_rule_ip``. + health_state (str): + Health state of the instance. + Check the HealthState enum for the list of + possible values. + + This field is a member of `oneof`_ ``_health_state``. + instance (str): + URL of the instance resource. + + This field is a member of `oneof`_ ``_instance``. + ip_address (str): + For target pool based Network Load Balancing, + it indicates the forwarding rule's IP address + assigned to this instance. For other types of + load balancing, the field indicates VM internal + ip. + + This field is a member of `oneof`_ ``_ip_address``. + port (int): + The named port of the instance group, not + necessarily the port that is health-checked. + + This field is a member of `oneof`_ ``_port``. + weight (str): + + This field is a member of `oneof`_ ``_weight``. + weight_error (str): + Check the WeightError enum for the list of + possible values. + + This field is a member of `oneof`_ ``_weight_error``. + """ + class HealthState(proto.Enum): + r"""Health state of the instance.""" + UNDEFINED_HEALTH_STATE = 0 + HEALTHY = 439801213 + UNHEALTHY = 462118084 + + class WeightError(proto.Enum): + r"""""" + UNDEFINED_WEIGHT_ERROR = 0 + INVALID_WEIGHT = 383698400 + MISSING_WEIGHT = 384027537 + UNAVAILABLE_WEIGHT = 439464295 + WEIGHT_NONE = 502428831 + + annotations = proto.MapField( + proto.STRING, + proto.STRING, + number=112032548, + ) + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + optional=True, + ) + forwarding_rule_ip = proto.Field( + proto.STRING, + number=172250632, + optional=True, + ) + health_state = proto.Field( + proto.STRING, + number=324007150, + optional=True, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + optional=True, + ) + ip_address = proto.Field( + proto.STRING, + number=406272220, + optional=True, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + weight = proto.Field( + proto.STRING, + number=282149496, + optional=True, + ) + weight_error = proto.Field( + proto.STRING, + number=522501505, + optional=True, + ) + + +class HealthStatusForNetworkEndpoint(proto.Message): + r""" + + Attributes: + backend_service (google.cloud.compute_v1.types.BackendServiceReference): + URL of the backend service associated with + the health state of the network endpoint. + + This field is a member of `oneof`_ ``_backend_service``. + forwarding_rule (google.cloud.compute_v1.types.ForwardingRuleReference): + URL of the forwarding rule associated with + the health state of the network endpoint. + + This field is a member of `oneof`_ ``_forwarding_rule``. + health_check (google.cloud.compute_v1.types.HealthCheckReference): + URL of the health check associated with the + health state of the network endpoint. + + This field is a member of `oneof`_ ``_health_check``. + health_check_service (google.cloud.compute_v1.types.HealthCheckServiceReference): + URL of the health check service associated + with the health state of the network endpoint. + + This field is a member of `oneof`_ ``_health_check_service``. + health_state (str): + Health state of the network endpoint + determined based on the health checks + configured. Check the HealthState enum for the + list of possible values. + + This field is a member of `oneof`_ ``_health_state``. + """ + class HealthState(proto.Enum): + r"""Health state of the network endpoint determined based on the + health checks configured. + """ + UNDEFINED_HEALTH_STATE = 0 + DRAINING = 480455402 + HEALTHY = 439801213 + UNHEALTHY = 462118084 + UNKNOWN = 433141802 + + backend_service = proto.Field( + proto.MESSAGE, + number=306946058, + optional=True, + message='BackendServiceReference', + ) + forwarding_rule = proto.Field( + proto.MESSAGE, + number=269964030, + optional=True, + message='ForwardingRuleReference', + ) + health_check = proto.Field( + proto.MESSAGE, + number=308876645, + optional=True, + message='HealthCheckReference', + ) + health_check_service = proto.Field( + proto.MESSAGE, + number=408374747, + optional=True, + message='HealthCheckServiceReference', + ) + health_state = proto.Field( + proto.STRING, + number=324007150, + optional=True, + ) + + +class HostRule(proto.Message): + r"""UrlMaps A host-matching rule for a URL. If matched, will use + the named PathMatcher to select the BackendService. + + Attributes: + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + hosts (Sequence[str]): + The list of host patterns to match. They must be valid + hostnames with optional port numbers in the format + host:port. \* matches any string of ([a-z0-9-.]*). In that + case, \* must be the first character and must be followed in + the pattern by either - or .. \* based matching is not + supported when the URL map is bound to target gRPC proxy + that has validateForProxyless field set to true. + path_matcher (str): + The name of the PathMatcher to use to match + the path portion of the URL if the hostRule + matches the URL's host portion. + + This field is a member of `oneof`_ ``_path_matcher``. + """ + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + hosts = proto.RepeatedField( + proto.STRING, + number=99467211, + ) + path_matcher = proto.Field( + proto.STRING, + number=337813272, + optional=True, + ) + + +class HttpFaultAbort(proto.Message): + r"""Specification for how requests are aborted as part of fault + injection. + + Attributes: + http_status (int): + The HTTP status code used to abort the + request. The value must be between 200 and 599 + inclusive. For gRPC protocol, the gRPC status + code is mapped to HTTP status code according to + this mapping table. HTTP status 200 is mapped to + gRPC status UNKNOWN. Injecting an OK status is + currently not supported by Traffic Director. + + This field is a member of `oneof`_ ``_http_status``. + percentage (float): + The percentage of traffic + (connections/operations/requests) which will be + aborted as part of fault injection. The value + must be between 0.0 and 100.0 inclusive. + + This field is a member of `oneof`_ ``_percentage``. + """ + + http_status = proto.Field( + proto.UINT32, + number=468949897, + optional=True, + ) + percentage = proto.Field( + proto.DOUBLE, + number=151909018, + optional=True, + ) + + +class HttpFaultDelay(proto.Message): + r"""Specifies the delay introduced by Loadbalancer before + forwarding the request to the backend service as part of fault + injection. + + Attributes: + fixed_delay (google.cloud.compute_v1.types.Duration): + Specifies the value of the fixed delay + interval. + + This field is a member of `oneof`_ ``_fixed_delay``. + percentage (float): + The percentage of traffic + (connections/operations/requests) on which delay + will be introduced as part of fault injection. + The value must be between 0.0 and 100.0 + inclusive. + + This field is a member of `oneof`_ ``_percentage``. + """ + + fixed_delay = proto.Field( + proto.MESSAGE, + number=317037816, + optional=True, + message='Duration', + ) + percentage = proto.Field( + proto.DOUBLE, + number=151909018, + optional=True, + ) + + +class HttpFaultInjection(proto.Message): + r"""The specification for fault injection introduced into traffic + to test the resiliency of clients to backend service failure. As + part of fault injection, when clients send requests to a backend + service, delays can be introduced by Loadbalancer on a + percentage of requests before sending those request to the + backend service. Similarly requests from clients can be aborted + by the Loadbalancer for a percentage of requests. + + Attributes: + abort (google.cloud.compute_v1.types.HttpFaultAbort): + The specification for how client requests are + aborted as part of fault injection. + + This field is a member of `oneof`_ ``_abort``. + delay (google.cloud.compute_v1.types.HttpFaultDelay): + The specification for how client requests are + delayed as part of fault injection, before being + sent to a backend service. + + This field is a member of `oneof`_ ``_delay``. + """ + + abort = proto.Field( + proto.MESSAGE, + number=92611376, + optional=True, + message='HttpFaultAbort', + ) + delay = proto.Field( + proto.MESSAGE, + number=95467907, + optional=True, + message='HttpFaultDelay', + ) + + +class HttpHeaderAction(proto.Message): + r"""The request and response header transformations that take + effect before the request is passed along to the selected + backendService. + + Attributes: + request_headers_to_add (Sequence[google.cloud.compute_v1.types.HttpHeaderOption]): + Headers to add to a matching request prior to + forwarding the request to the backendService. + request_headers_to_remove (Sequence[str]): + A list of header names for headers that need + to be removed from the request prior to + forwarding the request to the backendService. + response_headers_to_add (Sequence[google.cloud.compute_v1.types.HttpHeaderOption]): + Headers to add the response prior to sending + the response back to the client. + response_headers_to_remove (Sequence[str]): + A list of header names for headers that need + to be removed from the response prior to sending + the response back to the client. + """ + + request_headers_to_add = proto.RepeatedField( + proto.MESSAGE, + number=72111974, + message='HttpHeaderOption', + ) + request_headers_to_remove = proto.RepeatedField( + proto.STRING, + number=218425247, + ) + response_headers_to_add = proto.RepeatedField( + proto.MESSAGE, + number=32136052, + message='HttpHeaderOption', + ) + response_headers_to_remove = proto.RepeatedField( + proto.STRING, + number=75415761, + ) + + +class HttpHeaderMatch(proto.Message): + r"""matchRule criteria for request header matches. + + Attributes: + exact_match (str): + The value should exactly match contents of + exactMatch. Only one of exactMatch, prefixMatch, + suffixMatch, regexMatch, presentMatch or + rangeMatch must be set. + + This field is a member of `oneof`_ ``_exact_match``. + header_name (str): + The name of the HTTP header to match. For matching against + the HTTP request's authority, use a headerMatch with the + header name ":authority". For matching a request's method, + use the headerName ":method". When the URL map is bound to + target gRPC proxy that has validateForProxyless field set to + true, only non-binary user-specified custom metadata and the + ``content-type`` header are supported. The following + transport-level headers cannot be used in header matching + rules: ``:authority``, ``:method``, ``:path``, ``:scheme``, + ``user-agent``, ``accept-encoding``, ``content-encoding``, + ``grpc-accept-encoding``, ``grpc-encoding``, + ``grpc-previous-rpc-attempts``, ``grpc-tags-bin``, + ``grpc-timeout`` and \`grpc-trace-bin. + + This field is a member of `oneof`_ ``_header_name``. + invert_match (bool): + If set to false, the headerMatch is + considered a match if the match criteria above + are met. If set to true, the headerMatch is + considered a match if the match criteria above + are NOT met. The default setting is false. + + This field is a member of `oneof`_ ``_invert_match``. + prefix_match (str): + The value of the header must start with the + contents of prefixMatch. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + + This field is a member of `oneof`_ ``_prefix_match``. + present_match (bool): + A header with the contents of headerName must + exist. The match takes place whether or not the + request's header has a value. Only one of + exactMatch, prefixMatch, suffixMatch, + regexMatch, presentMatch or rangeMatch must be + set. + + This field is a member of `oneof`_ ``_present_match``. + range_match (google.cloud.compute_v1.types.Int64RangeMatch): + The header value must be an integer and its value must be in + the range specified in rangeMatch. If the header does not + contain an integer, number or is empty, the match fails. For + example for a range [-5, 0] - -3 will match. - 0 will not + match. - 0.25 will not match. - -3someString will not match. + Only one of exactMatch, prefixMatch, suffixMatch, + regexMatch, presentMatch or rangeMatch must be set. Note + that rangeMatch is not supported for Loadbalancers that have + their loadBalancingScheme set to EXTERNAL. + + This field is a member of `oneof`_ ``_range_match``. + regex_match (str): + The value of the header must match the regular expression + specified in regexMatch. For regular expression grammar, + please see: github.com/google/re2/wiki/Syntax For matching + against a port specified in the HTTP request, use a + headerMatch with headerName set to PORT and a regular + expression that satisfies the RFC2616 Host header's port + specifier. Only one of exactMatch, prefixMatch, suffixMatch, + regexMatch, presentMatch or rangeMatch must be set. Note + that regexMatch only applies to Loadbalancers that have + their loadBalancingScheme set to INTERNAL_SELF_MANAGED. + + This field is a member of `oneof`_ ``_regex_match``. + suffix_match (str): + The value of the header must end with the + contents of suffixMatch. Only one of exactMatch, + prefixMatch, suffixMatch, regexMatch, + presentMatch or rangeMatch must be set. + + This field is a member of `oneof`_ ``_suffix_match``. + """ + + exact_match = proto.Field( + proto.STRING, + number=457641093, + optional=True, + ) + header_name = proto.Field( + proto.STRING, + number=110223613, + optional=True, + ) + invert_match = proto.Field( + proto.BOOL, + number=501130268, + optional=True, + ) + prefix_match = proto.Field( + proto.STRING, + number=257898968, + optional=True, + ) + present_match = proto.Field( + proto.BOOL, + number=67435841, + optional=True, + ) + range_match = proto.Field( + proto.MESSAGE, + number=97244227, + optional=True, + message='Int64RangeMatch', + ) + regex_match = proto.Field( + proto.STRING, + number=107387853, + optional=True, + ) + suffix_match = proto.Field( + proto.STRING, + number=426488663, + optional=True, + ) + + +class HttpHeaderOption(proto.Message): + r"""Specification determining how headers are added to requests + or responses. + + Attributes: + header_name (str): + The name of the header. + + This field is a member of `oneof`_ ``_header_name``. + header_value (str): + The value of the header to add. + + This field is a member of `oneof`_ ``_header_value``. + replace (bool): + If false, headerValue is appended to any + values that already exist for the header. If + true, headerValue is set for the header, + discarding any values that were set for that + header. The default value is false. + + This field is a member of `oneof`_ ``_replace``. + """ + + header_name = proto.Field( + proto.STRING, + number=110223613, + optional=True, + ) + header_value = proto.Field( + proto.STRING, + number=203094335, + optional=True, + ) + replace = proto.Field( + proto.BOOL, + number=20755124, + optional=True, + ) + + +class HttpQueryParameterMatch(proto.Message): + r"""HttpRouteRuleMatch criteria for a request's query parameter. + + Attributes: + exact_match (str): + The queryParameterMatch matches if the value + of the parameter exactly matches the contents of + exactMatch. Only one of presentMatch, exactMatch + or regexMatch must be set. + + This field is a member of `oneof`_ ``_exact_match``. + name (str): + The name of the query parameter to match. The + query parameter must exist in the request, in + the absence of which the request match fails. + + This field is a member of `oneof`_ ``_name``. + present_match (bool): + Specifies that the queryParameterMatch + matches if the request contains the query + parameter, irrespective of whether the parameter + has a value or not. Only one of presentMatch, + exactMatch or regexMatch must be set. + + This field is a member of `oneof`_ ``_present_match``. + regex_match (str): + The queryParameterMatch matches if the value of the + parameter matches the regular expression specified by + regexMatch. For the regular expression grammar, please see + github.com/google/re2/wiki/Syntax Only one of presentMatch, + exactMatch or regexMatch must be set. Note that regexMatch + only applies when the loadBalancingScheme is set to + INTERNAL_SELF_MANAGED. + + This field is a member of `oneof`_ ``_regex_match``. + """ + + exact_match = proto.Field( + proto.STRING, + number=457641093, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + present_match = proto.Field( + proto.BOOL, + number=67435841, + optional=True, + ) + regex_match = proto.Field( + proto.STRING, + number=107387853, + optional=True, + ) + + +class HttpRedirectAction(proto.Message): + r"""Specifies settings for an HTTP redirect. + + Attributes: + host_redirect (str): + The host that will be used in the redirect + response instead of the one that was supplied in + the request. The value must be between 1 and 255 + characters. + + This field is a member of `oneof`_ ``_host_redirect``. + https_redirect (bool): + If set to true, the URL scheme in the + redirected request is set to https. If set to + false, the URL scheme of the redirected request + will remain the same as that of the request. + This must only be set for UrlMaps used in + TargetHttpProxys. Setting this true for + TargetHttpsProxy is not permitted. The default + is set to false. + + This field is a member of `oneof`_ ``_https_redirect``. + path_redirect (str): + The path that will be used in the redirect + response instead of the one that was supplied in + the request. pathRedirect cannot be supplied + together with prefixRedirect. Supply one alone + or neither. If neither is supplied, the path of + the original request will be used for the + redirect. The value must be between 1 and 1024 + characters. + + This field is a member of `oneof`_ ``_path_redirect``. + prefix_redirect (str): + The prefix that replaces the prefixMatch + specified in the HttpRouteRuleMatch, retaining + the remaining portion of the URL before + redirecting the request. prefixRedirect cannot + be supplied together with pathRedirect. Supply + one alone or neither. If neither is supplied, + the path of the original request will be used + for the redirect. The value must be between 1 + and 1024 characters. + + This field is a member of `oneof`_ ``_prefix_redirect``. + redirect_response_code (str): + The HTTP Status code to use for this RedirectAction. + Supported values are: - MOVED_PERMANENTLY_DEFAULT, which is + the default value and corresponds to 301. - FOUND, which + corresponds to 302. - SEE_OTHER which corresponds to 303. - + TEMPORARY_REDIRECT, which corresponds to 307. In this case, + the request method will be retained. - PERMANENT_REDIRECT, + which corresponds to 308. In this case, the request method + will be retained. Check the RedirectResponseCode enum for + the list of possible values. + + This field is a member of `oneof`_ ``_redirect_response_code``. + strip_query (bool): + If set to true, any accompanying query + portion of the original URL is removed prior to + redirecting the request. If set to false, the + query portion of the original URL is retained. + The default is set to false. + + This field is a member of `oneof`_ ``_strip_query``. + """ + class RedirectResponseCode(proto.Enum): + r"""The HTTP Status code to use for this RedirectAction. Supported + values are: - MOVED_PERMANENTLY_DEFAULT, which is the default value + and corresponds to 301. - FOUND, which corresponds to 302. - + SEE_OTHER which corresponds to 303. - TEMPORARY_REDIRECT, which + corresponds to 307. In this case, the request method will be + retained. - PERMANENT_REDIRECT, which corresponds to 308. In this + case, the request method will be retained. + """ + UNDEFINED_REDIRECT_RESPONSE_CODE = 0 + FOUND = 67084130 + MOVED_PERMANENTLY_DEFAULT = 386698449 + PERMANENT_REDIRECT = 382006381 + SEE_OTHER = 445380580 + TEMPORARY_REDIRECT = 489550378 + + host_redirect = proto.Field( + proto.STRING, + number=107417747, + optional=True, + ) + https_redirect = proto.Field( + proto.BOOL, + number=170260656, + optional=True, + ) + path_redirect = proto.Field( + proto.STRING, + number=272342710, + optional=True, + ) + prefix_redirect = proto.Field( + proto.STRING, + number=446184169, + optional=True, + ) + redirect_response_code = proto.Field( + proto.STRING, + number=436710408, + optional=True, + ) + strip_query = proto.Field( + proto.BOOL, + number=52284641, + optional=True, + ) + + +class HttpRetryPolicy(proto.Message): + r"""The retry policy associates with HttpRouteRule + + Attributes: + num_retries (int): + Specifies the allowed number retries. This + number must be > 0. If not specified, defaults + to 1. + + This field is a member of `oneof`_ ``_num_retries``. + per_try_timeout (google.cloud.compute_v1.types.Duration): + Specifies a non-zero timeout per retry + attempt. If not specified, will use the timeout + set in HttpRouteAction. If timeout in + HttpRouteAction is not set, will use the largest + timeout among all backend services associated + with the route. + + This field is a member of `oneof`_ ``_per_try_timeout``. + retry_conditions (Sequence[str]): + Specifies one or more conditions when this retry policy + applies. Valid values are: - 5xx: Retry will be attempted if + the instance or endpoint responds with any 5xx response + code, or if the instance or endpoint does not respond at + all, example: disconnects, reset, read timeout, connection + failure, and refused streams. - gateway-error: Similar to + 5xx, but only applies to response codes 502, 503 or 504. - - + connect-failure: A retry will be attempted on failures + connecting to the instance or endpoint, for example due to + connection timeouts. - retriable-4xx: A retry will be + attempted if the instance or endpoint responds with a + retriable 4xx response code. Currently the only retriable + error supported is 409. - refused-stream: A retry will be + attempted if the instance or endpoint resets the stream with + a REFUSED_STREAM error code. This reset type indicates that + it is safe to retry. - cancelled: A retry will be attempted + if the gRPC status code in the response header is set to + cancelled. - deadline-exceeded: A retry will be attempted if + the gRPC status code in the response header is set to + deadline-exceeded. - internal: A retry will be attempted if + the gRPC status code in the response header is set to + internal. - resource-exhausted: A retry will be attempted if + the gRPC status code in the response header is set to + resource-exhausted. - unavailable: A retry will be attempted + if the gRPC status code in the response header is set to + unavailable. + """ + + num_retries = proto.Field( + proto.UINT32, + number=251680141, + optional=True, + ) + per_try_timeout = proto.Field( + proto.MESSAGE, + number=280041147, + optional=True, + message='Duration', + ) + retry_conditions = proto.RepeatedField( + proto.STRING, + number=28815535, + ) + + +class HttpRouteAction(proto.Message): + r""" + + Attributes: + cors_policy (google.cloud.compute_v1.types.CorsPolicy): + The specification for allowing client side + cross-origin requests. Please see W3C + Recommendation for Cross Origin Resource Sharing + Not supported when the URL map is bound to + target gRPC proxy. + + This field is a member of `oneof`_ ``_cors_policy``. + fault_injection_policy (google.cloud.compute_v1.types.HttpFaultInjection): + The specification for fault injection introduced into + traffic to test the resiliency of clients to backend service + failure. As part of fault injection, when clients send + requests to a backend service, delays can be introduced by + Loadbalancer on a percentage of requests before sending + those request to the backend service. Similarly requests + from clients can be aborted by the Loadbalancer for a + percentage of requests. For the requests impacted by fault + injection, timeout and retry_policy will be ignored by + clients that are configured with a fault_injection_policy. + + This field is a member of `oneof`_ ``_fault_injection_policy``. + max_stream_duration (google.cloud.compute_v1.types.Duration): + Specifies the maximum duration (timeout) for streams on the + selected route. Unlike the timeout field where the timeout + duration starts from the time the request has been fully + processed (i.e. end-of-stream), the duration in this field + is computed from the beginning of the stream until the + response has been completely processed, including all + retries. A stream that does not complete in this duration is + closed. If not specified, will use the largest + maxStreamDuration among all backend services associated with + the route. This field is only allowed if the Url map is used + with backend services with loadBalancingScheme set to + INTERNAL_SELF_MANAGED. + + This field is a member of `oneof`_ ``_max_stream_duration``. + request_mirror_policy (google.cloud.compute_v1.types.RequestMirrorPolicy): + Specifies the policy on how requests intended + for the route's backends are shadowed to a + separate mirrored backend service. Loadbalancer + does not wait for responses from the shadow + service. Prior to sending traffic to the shadow + service, the host / authority header is suffixed + with -shadow. Not supported when the URL map is + bound to target gRPC proxy that has + validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_request_mirror_policy``. + retry_policy (google.cloud.compute_v1.types.HttpRetryPolicy): + Specifies the retry policy associated with + this route. Not supported when the URL map is + bound to target gRPC proxy that has + validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_retry_policy``. + timeout (google.cloud.compute_v1.types.Duration): + Specifies the timeout for the selected route. + Timeout is computed from the time the request + has been fully processed (i.e. end-of-stream) up + until the response has been completely + processed. Timeout includes all retries. If not + specified, will use the largest timeout among + all backend services associated with the route. + Not supported when the URL map is bound to + target gRPC proxy that has validateForProxyless + field set to true. + + This field is a member of `oneof`_ ``_timeout``. + url_rewrite (google.cloud.compute_v1.types.UrlRewrite): + The spec to modify the URL of the request, + prior to forwarding the request to the matched + service. urlRewrite is the only action supported + in UrlMaps for external HTTP(S) load balancers. + Not supported when the URL map is bound to + target gRPC proxy that has validateForProxyless + field set to true. + + This field is a member of `oneof`_ ``_url_rewrite``. + weighted_backend_services (Sequence[google.cloud.compute_v1.types.WeightedBackendService]): + A list of weighted backend services to send + traffic to when a route match occurs. The + weights determine the fraction of traffic that + flows to their corresponding backend service. If + all traffic needs to go to a single backend + service, there must be one + weightedBackendService with weight set to a non- + zero number. Once a backendService is identified + and before forwarding the request to the backend + service, advanced routing actions such as URL + rewrites and header transformations are applied + depending on additional settings specified in + this HttpRouteAction. + """ + + cors_policy = proto.Field( + proto.MESSAGE, + number=398943748, + optional=True, + message='CorsPolicy', + ) + fault_injection_policy = proto.Field( + proto.MESSAGE, + number=412781079, + optional=True, + message='HttpFaultInjection', + ) + max_stream_duration = proto.Field( + proto.MESSAGE, + number=61428376, + optional=True, + message='Duration', + ) + request_mirror_policy = proto.Field( + proto.MESSAGE, + number=220196866, + optional=True, + message='RequestMirrorPolicy', + ) + retry_policy = proto.Field( + proto.MESSAGE, + number=56799913, + optional=True, + message='HttpRetryPolicy', + ) + timeout = proto.Field( + proto.MESSAGE, + number=296701281, + optional=True, + message='Duration', + ) + url_rewrite = proto.Field( + proto.MESSAGE, + number=273333948, + optional=True, + message='UrlRewrite', + ) + weighted_backend_services = proto.RepeatedField( + proto.MESSAGE, + number=337028049, + message='WeightedBackendService', + ) + + +class HttpRouteRule(proto.Message): + r"""An HttpRouteRule specifies how to match an HTTP request and + the corresponding routing action that load balancing proxies + will perform. + + Attributes: + description (str): + The short description conveying the intent of + this routeRule. The description can have a + maximum length of 1024 characters. + + This field is a member of `oneof`_ ``_description``. + header_action (google.cloud.compute_v1.types.HttpHeaderAction): + Specifies changes to request and response headers that need + to take effect for the selected backendService. The + headerAction specified here are applied before the matching + pathMatchers[].headerAction and after + pathMatchers[].routeRules[].routeAction.weightedBackendService.backendServiceWeightAction[].headerAction + Note that headerAction is not supported for Loadbalancers + that have their loadBalancingScheme set to EXTERNAL. Not + supported when the URL map is bound to target gRPC proxy + that has validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_header_action``. + match_rules (Sequence[google.cloud.compute_v1.types.HttpRouteRuleMatch]): + The list of criteria for matching attributes + of a request to this routeRule. This list has OR + semantics: the request matches this routeRule + when any of the matchRules are satisfied. + However predicates within a given matchRule have + AND semantics. All predicates within a matchRule + must match for the request to match the rule. + priority (int): + For routeRules within a given pathMatcher, + priority determines the order in which load + balancer will interpret routeRules. RouteRules + are evaluated in order of priority, from the + lowest to highest number. The priority of a rule + decreases as its number increases (1, 2, 3, + N+1). The first rule that matches the request is + applied. You cannot configure two or more + routeRules with the same priority. Priority for + each rule must be set to a number between 0 and + 2147483647 inclusive. Priority numbers can have + gaps, which enable you to add or remove rules in + the future without affecting the rest of the + rules. For example, 1, 2, 3, 4, 5, 9, 12, 16 is + a valid series of priority numbers to which you + could add rules numbered from 6 to 8, 10 to 11, + and 13 to 15 in the future without any impact on + existing rules. + + This field is a member of `oneof`_ ``_priority``. + route_action (google.cloud.compute_v1.types.HttpRouteAction): + In response to a matching matchRule, the load + balancer performs advanced routing actions like + URL rewrites, header transformations, etc. prior + to forwarding the request to the selected + backend. If routeAction specifies any + weightedBackendServices, service must not be + set. Conversely if service is set, routeAction + cannot contain any weightedBackendServices. Only + one of urlRedirect, service or + routeAction.weightedBackendService must be set. + UrlMaps for external HTTP(S) load balancers + support only the urlRewrite action within a + routeRule's routeAction. + + This field is a member of `oneof`_ ``_route_action``. + service (str): + The full or partial URL of the backend + service resource to which traffic is directed if + this rule is matched. If routeAction is + additionally specified, advanced routing actions + like URL Rewrites, etc. take effect prior to + sending the request to the backend. However, if + service is specified, routeAction cannot contain + any weightedBackendService s. Conversely, if + routeAction specifies any + weightedBackendServices, service must not be + specified. Only one of urlRedirect, service or + routeAction.weightedBackendService must be set. + + This field is a member of `oneof`_ ``_service``. + url_redirect (google.cloud.compute_v1.types.HttpRedirectAction): + When this rule is matched, the request is + redirected to a URL specified by urlRedirect. If + urlRedirect is specified, service or routeAction + must not be set. Not supported when the URL map + is bound to target gRPC proxy. + + This field is a member of `oneof`_ ``_url_redirect``. + """ + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + header_action = proto.Field( + proto.MESSAGE, + number=328077352, + optional=True, + message='HttpHeaderAction', + ) + match_rules = proto.RepeatedField( + proto.MESSAGE, + number=376200701, + message='HttpRouteRuleMatch', + ) + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + route_action = proto.Field( + proto.MESSAGE, + number=424563948, + optional=True, + message='HttpRouteAction', + ) + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + url_redirect = proto.Field( + proto.MESSAGE, + number=405147820, + optional=True, + message='HttpRedirectAction', + ) + + +class HttpRouteRuleMatch(proto.Message): + r"""HttpRouteRuleMatch specifies a set of criteria for matching + requests to an HttpRouteRule. All specified criteria must be + satisfied for a match to occur. + + Attributes: + full_path_match (str): + For satisfying the matchRule condition, the + path of the request must exactly match the value + specified in fullPathMatch after removing any + query parameters and anchor that may be part of + the original URL. fullPathMatch must be between + 1 and 1024 characters. Only one of prefixMatch, + fullPathMatch or regexMatch must be specified. + + This field is a member of `oneof`_ ``_full_path_match``. + header_matches (Sequence[google.cloud.compute_v1.types.HttpHeaderMatch]): + Specifies a list of header match criteria, + all of which must match corresponding headers in + the request. + ignore_case (bool): + Specifies that prefixMatch and fullPathMatch + matches are case sensitive. The default value is + false. ignoreCase must not be used with + regexMatch. Not supported when the URL map is + bound to target gRPC proxy. + + This field is a member of `oneof`_ ``_ignore_case``. + metadata_filters (Sequence[google.cloud.compute_v1.types.MetadataFilter]): + Opaque filter criteria used by Loadbalancer to restrict + routing configuration to a limited set of xDS compliant + clients. In their xDS requests to Loadbalancer, xDS clients + present node metadata. When there is a match, the relevant + routing configuration is made available to those proxies. + For each metadataFilter in this list, if its + filterMatchCriteria is set to MATCH_ANY, at least one of the + filterLabels must match the corresponding label provided in + the metadata. If its filterMatchCriteria is set to + MATCH_ALL, then all of its filterLabels must match with + corresponding labels provided in the metadata. If multiple + metadataFilters are specified, all of them need to be + satisfied in order to be considered a match. metadataFilters + specified here will be applied after those specified in + ForwardingRule that refers to the UrlMap this + HttpRouteRuleMatch belongs to. metadataFilters only applies + to Loadbalancers that have their loadBalancingScheme set to + INTERNAL_SELF_MANAGED. Not supported when the URL map is + bound to target gRPC proxy that has validateForProxyless + field set to true. + prefix_match (str): + For satisfying the matchRule condition, the + request's path must begin with the specified + prefixMatch. prefixMatch must begin with a /. + The value must be between 1 and 1024 characters. + Only one of prefixMatch, fullPathMatch or + regexMatch must be specified. + + This field is a member of `oneof`_ ``_prefix_match``. + query_parameter_matches (Sequence[google.cloud.compute_v1.types.HttpQueryParameterMatch]): + Specifies a list of query parameter match + criteria, all of which must match corresponding + query parameters in the request. Not supported + when the URL map is bound to target gRPC proxy. + regex_match (str): + For satisfying the matchRule condition, the path of the + request must satisfy the regular expression specified in + regexMatch after removing any query parameters and anchor + supplied with the original URL. For regular expression + grammar please see github.com/google/re2/wiki/Syntax Only + one of prefixMatch, fullPathMatch or regexMatch must be + specified. Note that regexMatch only applies to + Loadbalancers that have their loadBalancingScheme set to + INTERNAL_SELF_MANAGED. + + This field is a member of `oneof`_ ``_regex_match``. + """ + + full_path_match = proto.Field( + proto.STRING, + number=214598875, + optional=True, + ) + header_matches = proto.RepeatedField( + proto.MESSAGE, + number=361903489, + message='HttpHeaderMatch', + ) + ignore_case = proto.Field( + proto.BOOL, + number=464324989, + optional=True, + ) + metadata_filters = proto.RepeatedField( + proto.MESSAGE, + number=464725739, + message='MetadataFilter', + ) + prefix_match = proto.Field( + proto.STRING, + number=257898968, + optional=True, + ) + query_parameter_matches = proto.RepeatedField( + proto.MESSAGE, + number=286231270, + message='HttpQueryParameterMatch', + ) + regex_match = proto.Field( + proto.STRING, + number=107387853, + optional=True, + ) + + +class Image(proto.Message): + r"""Represents an Image resource. You can use images to create + boot disks for your VM instances. For more information, read + Images. + + Attributes: + archive_size_bytes (int): + Size of the image tar.gz archive stored in + Google Cloud Storage (in bytes). + + This field is a member of `oneof`_ ``_archive_size_bytes``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + deprecated (google.cloud.compute_v1.types.DeprecationStatus): + The deprecation status associated with this + image. + + This field is a member of `oneof`_ ``_deprecated``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + disk_size_gb (int): + Size of the image when restored onto a + persistent disk (in GB). + + This field is a member of `oneof`_ ``_disk_size_gb``. + family (str): + The name of the image family to which this + image belongs. You can create disks by + specifying an image family instead of a specific + image name. The image family always returns its + latest image that is not deprecated. The name of + the image family must comply with RFC1035. + + This field is a member of `oneof`_ ``_family``. + guest_os_features (Sequence[google.cloud.compute_v1.types.GuestOsFeature]): + A list of features to enable on the guest + operating system. Applicable only for bootable + images. Read Enabling guest operating system + features to see a list of available options. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + image_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + Encrypts the image using a customer-supplied + encryption key. After you encrypt an image with + a customer-supplied key, you must provide the + same key if you use the image later (e.g. to + create a disk from the image). Customer-supplied + encryption keys do not protect access to + metadata of the disk. If you do not provide an + encryption key when creating the image, then the + disk will be encrypted using an automatically + generated key and you do not need to provide a + key to use the image later. + + This field is a member of `oneof`_ ``_image_encryption_key``. + kind (str): + [Output Only] Type of the resource. Always compute#image for + images. + + This field is a member of `oneof`_ ``_kind``. + label_fingerprint (str): + A fingerprint for the labels being applied to + this image, which is essentially a hash of the + labels used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash in order to update or + change labels, otherwise the request will fail + with error 412 conditionNotMet. To see the + latest fingerprint, make a get() request to + retrieve an image. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.Image.LabelsEntry]): + Labels to apply to this image. These can be + later modified by the setLabels method. + license_codes (Sequence[int]): + Integer license codes indicating which + licenses are attached to this image. + licenses (Sequence[str]): + Any applicable license URI. + name (str): + Name of the resource; provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + raw_disk (google.cloud.compute_v1.types.RawDisk): + The parameters of the raw disk image. + + This field is a member of `oneof`_ ``_raw_disk``. + satisfies_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + shielded_instance_initial_state (google.cloud.compute_v1.types.InitialStateConfig): + Set the secure boot keys of shielded + instance. + + This field is a member of `oneof`_ ``_shielded_instance_initial_state``. + source_disk (str): + URL of the source disk used to create this + image. For example, the following are valid + values: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /disks/disk - + projects/project/zones/zone/disks/disk - + zones/zone/disks/disk In order to create an + image, you must provide the full or partial URL + of one of the following: - The rawDisk.source + URL - The sourceDisk URL - The sourceImage URL - + The sourceSnapshot URL + + This field is a member of `oneof`_ ``_source_disk``. + source_disk_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source disk. Required if the source disk is + protected by a customer-supplied encryption key. + + This field is a member of `oneof`_ ``_source_disk_encryption_key``. + source_disk_id (str): + [Output Only] The ID value of the disk used to create this + image. This value may be used to determine whether the image + was taken from the current or a previous instance of a given + disk name. + + This field is a member of `oneof`_ ``_source_disk_id``. + source_image (str): + URL of the source image used to create this image. The + following are valid formats for the URL: - + https://www.googleapis.com/compute/v1/projects/project_id/global/ + images/image_name - + projects/project_id/global/images/image_name In order to + create an image, you must provide the full or partial URL of + one of the following: - The rawDisk.source URL - The + sourceDisk URL - The sourceImage URL - The sourceSnapshot + URL + + This field is a member of `oneof`_ ``_source_image``. + source_image_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source image. Required if the source image is + protected by a customer-supplied encryption key. + + This field is a member of `oneof`_ ``_source_image_encryption_key``. + source_image_id (str): + [Output Only] The ID value of the image used to create this + image. This value may be used to determine whether the image + was taken from the current or a previous instance of a given + image name. + + This field is a member of `oneof`_ ``_source_image_id``. + source_snapshot (str): + URL of the source snapshot used to create this image. The + following are valid formats for the URL: - + https://www.googleapis.com/compute/v1/projects/project_id/global/ + snapshots/snapshot_name - + projects/project_id/global/snapshots/snapshot_name In order + to create an image, you must provide the full or partial URL + of one of the following: - The rawDisk.source URL - The + sourceDisk URL - The sourceImage URL - The sourceSnapshot + URL + + This field is a member of `oneof`_ ``_source_snapshot``. + source_snapshot_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source snapshot. Required if the source snapshot + is protected by a customer-supplied encryption + key. + + This field is a member of `oneof`_ ``_source_snapshot_encryption_key``. + source_snapshot_id (str): + [Output Only] The ID value of the snapshot used to create + this image. This value may be used to determine whether the + snapshot was taken from the current or a previous instance + of a given snapshot name. + + This field is a member of `oneof`_ ``_source_snapshot_id``. + source_type (str): + The type of the image used to create this + disk. The default and only value is RAW Check + the SourceType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_source_type``. + status (str): + [Output Only] The status of the image. An image can be used + to create other resources, such as instances, only after the + image has been successfully created and the status is set to + READY. Possible values are FAILED, PENDING, or READY. Check + the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + storage_locations (Sequence[str]): + Cloud Storage bucket storage location of the + image (regional or multi-regional). + """ + class SourceType(proto.Enum): + r"""The type of the image used to create this disk. The default + and only value is RAW + """ + UNDEFINED_SOURCE_TYPE = 0 + RAW = 80904 + + class Status(proto.Enum): + r"""[Output Only] The status of the image. An image can be used to + create other resources, such as instances, only after the image has + been successfully created and the status is set to READY. Possible + values are FAILED, PENDING, or READY. + """ + UNDEFINED_STATUS = 0 + DELETING = 528602024 + FAILED = 455706685 + PENDING = 35394935 + READY = 77848963 + + archive_size_bytes = proto.Field( + proto.INT64, + number=381093450, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + deprecated = proto.Field( + proto.MESSAGE, + number=515138995, + optional=True, + message='DeprecationStatus', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disk_size_gb = proto.Field( + proto.INT64, + number=316263735, + optional=True, + ) + family = proto.Field( + proto.STRING, + number=328751972, + optional=True, + ) + guest_os_features = proto.RepeatedField( + proto.MESSAGE, + number=79294545, + message='GuestOsFeature', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + image_encryption_key = proto.Field( + proto.MESSAGE, + number=379512583, + optional=True, + message='CustomerEncryptionKey', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + license_codes = proto.RepeatedField( + proto.INT64, + number=45482664, + ) + licenses = proto.RepeatedField( + proto.STRING, + number=337642578, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + raw_disk = proto.Field( + proto.MESSAGE, + number=503113556, + optional=True, + message='RawDisk', + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + shielded_instance_initial_state = proto.Field( + proto.MESSAGE, + number=192356867, + optional=True, + message='InitialStateConfig', + ) + source_disk = proto.Field( + proto.STRING, + number=451753793, + optional=True, + ) + source_disk_encryption_key = proto.Field( + proto.MESSAGE, + number=531501153, + optional=True, + message='CustomerEncryptionKey', + ) + source_disk_id = proto.Field( + proto.STRING, + number=454190809, + optional=True, + ) + source_image = proto.Field( + proto.STRING, + number=50443319, + optional=True, + ) + source_image_encryption_key = proto.Field( + proto.MESSAGE, + number=381503659, + optional=True, + message='CustomerEncryptionKey', + ) + source_image_id = proto.Field( + proto.STRING, + number=55328291, + optional=True, + ) + source_snapshot = proto.Field( + proto.STRING, + number=126061928, + optional=True, + ) + source_snapshot_encryption_key = proto.Field( + proto.MESSAGE, + number=303679322, + optional=True, + message='CustomerEncryptionKey', + ) + source_snapshot_id = proto.Field( + proto.STRING, + number=98962258, + optional=True, + ) + source_type = proto.Field( + proto.STRING, + number=452245726, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + storage_locations = proto.RepeatedField( + proto.STRING, + number=328005274, + ) + + +class ImageFamilyView(proto.Message): + r""" + + Attributes: + image (google.cloud.compute_v1.types.Image): + The latest image that is part of the + specified image family in the requested + location, and that is not deprecated. + + This field is a member of `oneof`_ ``_image``. + """ + + image = proto.Field( + proto.MESSAGE, + number=100313435, + optional=True, + message='Image', + ) + + +class ImageList(proto.Message): + r"""Contains a list of images. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Image]): + A list of Image resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Image', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InitialStateConfig(proto.Message): + r"""Initial State for shielded instance, these are public keys + which are safe to store in public + + Attributes: + dbs (Sequence[google.cloud.compute_v1.types.FileContentBuffer]): + The Key Database (db). + dbxs (Sequence[google.cloud.compute_v1.types.FileContentBuffer]): + The forbidden key database (dbx). + keks (Sequence[google.cloud.compute_v1.types.FileContentBuffer]): + The Key Exchange Key (KEK). + pk (google.cloud.compute_v1.types.FileContentBuffer): + The Platform Key (PK). + + This field is a member of `oneof`_ ``_pk``. + """ + + dbs = proto.RepeatedField( + proto.MESSAGE, + number=99253, + message='FileContentBuffer', + ) + dbxs = proto.RepeatedField( + proto.MESSAGE, + number=3077113, + message='FileContentBuffer', + ) + keks = proto.RepeatedField( + proto.MESSAGE, + number=3288130, + message='FileContentBuffer', + ) + pk = proto.Field( + proto.MESSAGE, + number=3579, + optional=True, + message='FileContentBuffer', + ) + + +class InsertAddressRequest(proto.Message): + r"""A request message for Addresses.Insert. See the method + description for details. + + Attributes: + address_resource (google.cloud.compute_v1.types.Address): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + address_resource = proto.Field( + proto.MESSAGE, + number=483888121, + message='Address', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertAutoscalerRequest(proto.Message): + r"""A request message for Autoscalers.Insert. See the method + description for details. + + Attributes: + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + Name of the zone for this request. + """ + + autoscaler_resource = proto.Field( + proto.MESSAGE, + number=207616118, + message='Autoscaler', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertBackendBucketRequest(proto.Message): + r"""A request message for BackendBuckets.Insert. See the method + description for details. + + Attributes: + backend_bucket_resource (google.cloud.compute_v1.types.BackendBucket): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_bucket_resource = proto.Field( + proto.MESSAGE, + number=380757784, + message='BackendBucket', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.Insert. See the method + description for details. + + Attributes: + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service_resource = proto.Field( + proto.MESSAGE, + number=347586723, + message='BackendService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertDiskRequest(proto.Message): + r"""A request message for Disks.Insert. See the method + description for details. + + Attributes: + disk_resource (google.cloud.compute_v1.types.Disk): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + source_image (str): + Source image to restore onto a disk. This + field is optional. + + This field is a member of `oneof`_ ``_source_image``. + zone (str): + The name of the zone for this request. + """ + + disk_resource = proto.Field( + proto.MESSAGE, + number=25880688, + message='Disk', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + source_image = proto.Field( + proto.STRING, + number=50443319, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertExternalVpnGatewayRequest(proto.Message): + r"""A request message for ExternalVpnGateways.Insert. See the + method description for details. + + Attributes: + external_vpn_gateway_resource (google.cloud.compute_v1.types.ExternalVpnGateway): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + external_vpn_gateway_resource = proto.Field( + proto.MESSAGE, + number=486813576, + message='ExternalVpnGateway', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.Insert. See the method + description for details. + + Attributes: + firewall_policy_resource (google.cloud.compute_v1.types.FirewallPolicy): + The body resource for this request + parent_id (str): + Parent ID for this request. The ID can be either be + "folders/[FOLDER_ID]" if the parent is a folder or + "organizations/[ORGANIZATION_ID]" if the parent is an + organization. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy_resource = proto.Field( + proto.MESSAGE, + number=495049532, + message='FirewallPolicy', + ) + parent_id = proto.Field( + proto.STRING, + number=459714768, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertFirewallRequest(proto.Message): + r"""A request message for Firewalls.Insert. See the method + description for details. + + Attributes: + firewall_resource (google.cloud.compute_v1.types.Firewall): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_resource = proto.Field( + proto.MESSAGE, + number=41425005, + message='Firewall', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertForwardingRuleRequest(proto.Message): + r"""A request message for ForwardingRules.Insert. See the method + description for details. + + Attributes: + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + forwarding_rule_resource = proto.Field( + proto.MESSAGE, + number=301211695, + message='ForwardingRule', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertGlobalAddressRequest(proto.Message): + r"""A request message for GlobalAddresses.Insert. See the method + description for details. + + Attributes: + address_resource (google.cloud.compute_v1.types.Address): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + address_resource = proto.Field( + proto.MESSAGE, + number=483888121, + message='Address', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertGlobalForwardingRuleRequest(proto.Message): + r"""A request message for GlobalForwardingRules.Insert. See the + method description for details. + + Attributes: + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + forwarding_rule_resource = proto.Field( + proto.MESSAGE, + number=301211695, + message='ForwardingRule', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertGlobalNetworkEndpointGroupRequest(proto.Message): + r"""A request message for GlobalNetworkEndpointGroups.Insert. See + the method description for details. + + Attributes: + network_endpoint_group_resource (google.cloud.compute_v1.types.NetworkEndpointGroup): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network_endpoint_group_resource = proto.Field( + proto.MESSAGE, + number=525788839, + message='NetworkEndpointGroup', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertGlobalPublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for GlobalPublicDelegatedPrefixes.Insert. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix_resource = proto.Field( + proto.MESSAGE, + number=47594501, + message='PublicDelegatedPrefix', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertHealthCheckRequest(proto.Message): + r"""A request message for HealthChecks.Insert. See the method + description for details. + + Attributes: + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check_resource = proto.Field( + proto.MESSAGE, + number=201925032, + message='HealthCheck', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertImageRequest(proto.Message): + r"""A request message for Images.Insert. See the method + description for details. + + Attributes: + force_create (bool): + Force image creation if true. + + This field is a member of `oneof`_ ``_force_create``. + image_resource (google.cloud.compute_v1.types.Image): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + force_create = proto.Field( + proto.BOOL, + number=197723344, + optional=True, + ) + image_resource = proto.Field( + proto.MESSAGE, + number=371171954, + message='Image', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.Insert. See the + method description for details. + + Attributes: + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where you want to create + the managed instance group. + """ + + instance_group_manager_resource = proto.Field( + proto.MESSAGE, + number=261063946, + message='InstanceGroupManager', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertInstanceGroupRequest(proto.Message): + r"""A request message for InstanceGroups.Insert. See the method + description for details. + + Attributes: + instance_group_resource (google.cloud.compute_v1.types.InstanceGroup): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where you want to create + the instance group. + """ + + instance_group_resource = proto.Field( + proto.MESSAGE, + number=286612152, + message='InstanceGroup', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertInstanceRequest(proto.Message): + r"""A request message for Instances.Insert. See the method + description for details. + + Attributes: + instance_resource (google.cloud.compute_v1.types.Instance): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + source_instance_template (str): + Specifies instance template to create the + instance. This field is optional. It can be a + full or partial URL. For example, the following + are all valid URLs to an instance template: - + https://www.googleapis.com/compute/v1/projects/project + /global/instanceTemplates/instanceTemplate - + projects/project/global/instanceTemplates/instanceTemplate + - global/instanceTemplates/instanceTemplate + + This field is a member of `oneof`_ ``_source_instance_template``. + zone (str): + The name of the zone for this request. + """ + + instance_resource = proto.Field( + proto.MESSAGE, + number=215988344, + message='Instance', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + source_instance_template = proto.Field( + proto.STRING, + number=332423616, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertInstanceTemplateRequest(proto.Message): + r"""A request message for InstanceTemplates.Insert. See the + method description for details. + + Attributes: + instance_template_resource (google.cloud.compute_v1.types.InstanceTemplate): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_template_resource = proto.Field( + proto.MESSAGE, + number=10679561, + message='InstanceTemplate', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertInterconnectAttachmentRequest(proto.Message): + r"""A request message for InterconnectAttachments.Insert. See the + method description for details. + + Attributes: + interconnect_attachment_resource (google.cloud.compute_v1.types.InterconnectAttachment): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + validate_only (bool): + If true, the request will not be committed. + + This field is a member of `oneof`_ ``_validate_only``. + """ + + interconnect_attachment_resource = proto.Field( + proto.MESSAGE, + number=212341369, + message='InterconnectAttachment', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + validate_only = proto.Field( + proto.BOOL, + number=242744629, + optional=True, + ) + + +class InsertInterconnectRequest(proto.Message): + r"""A request message for Interconnects.Insert. See the method + description for details. + + Attributes: + interconnect_resource (google.cloud.compute_v1.types.Interconnect): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + interconnect_resource = proto.Field( + proto.MESSAGE, + number=397611167, + message='Interconnect', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertLicenseRequest(proto.Message): + r"""A request message for Licenses.Insert. See the method + description for details. + + Attributes: + license_resource (google.cloud.compute_v1.types.License): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + license_resource = proto.Field( + proto.MESSAGE, + number=437955148, + message='License', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertNetworkEndpointGroupRequest(proto.Message): + r"""A request message for NetworkEndpointGroups.Insert. See the + method description for details. + + Attributes: + network_endpoint_group_resource (google.cloud.compute_v1.types.NetworkEndpointGroup): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where you want to create + the network endpoint group. It should comply + with RFC1035. + """ + + network_endpoint_group_resource = proto.Field( + proto.MESSAGE, + number=525788839, + message='NetworkEndpointGroup', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertNetworkRequest(proto.Message): + r"""A request message for Networks.Insert. See the method + description for details. + + Attributes: + network_resource (google.cloud.compute_v1.types.Network): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network_resource = proto.Field( + proto.MESSAGE, + number=122105599, + message='Network', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.Insert. See the method + description for details. + + Attributes: + initial_node_count (int): + Initial count of nodes in the node group. + node_group_resource (google.cloud.compute_v1.types.NodeGroup): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + initial_node_count = proto.Field( + proto.INT32, + number=71951469, + ) + node_group_resource = proto.Field( + proto.MESSAGE, + number=505321899, + message='NodeGroup', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertNodeTemplateRequest(proto.Message): + r"""A request message for NodeTemplates.Insert. See the method + description for details. + + Attributes: + node_template_resource (google.cloud.compute_v1.types.NodeTemplate): + The body resource for this request + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + node_template_resource = proto.Field( + proto.MESSAGE, + number=127364406, + message='NodeTemplate', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertPacketMirroringRequest(proto.Message): + r"""A request message for PacketMirrorings.Insert. See the method + description for details. + + Attributes: + packet_mirroring_resource (google.cloud.compute_v1.types.PacketMirroring): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + packet_mirroring_resource = proto.Field( + proto.MESSAGE, + number=493501985, + message='PacketMirroring', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertPublicAdvertisedPrefixeRequest(proto.Message): + r"""A request message for PublicAdvertisedPrefixes.Insert. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + public_advertised_prefix_resource (google.cloud.compute_v1.types.PublicAdvertisedPrefix): + The body resource for this request + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_advertised_prefix_resource = proto.Field( + proto.MESSAGE, + number=233614223, + message='PublicAdvertisedPrefix', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertPublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for PublicDelegatedPrefixes.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + region (str): + Name of the region of this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix_resource = proto.Field( + proto.MESSAGE, + number=47594501, + message='PublicDelegatedPrefix', + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionAutoscalerRequest(proto.Message): + r"""A request message for RegionAutoscalers.Insert. See the + method description for details. + + Attributes: + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + autoscaler_resource = proto.Field( + proto.MESSAGE, + number=207616118, + message='Autoscaler', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionBackendServiceRequest(proto.Message): + r"""A request message for RegionBackendServices.Insert. See the + method description for details. + + Attributes: + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service_resource = proto.Field( + proto.MESSAGE, + number=347586723, + message='BackendService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionCommitmentRequest(proto.Message): + r"""A request message for RegionCommitments.Insert. See the + method description for details. + + Attributes: + commitment_resource (google.cloud.compute_v1.types.Commitment): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + commitment_resource = proto.Field( + proto.MESSAGE, + number=244240888, + message='Commitment', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.Insert. See the method + description for details. + + Attributes: + disk_resource (google.cloud.compute_v1.types.Disk): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + source_image (str): + Source image to restore onto a disk. This + field is optional. + + This field is a member of `oneof`_ ``_source_image``. + """ + + disk_resource = proto.Field( + proto.MESSAGE, + number=25880688, + message='Disk', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + source_image = proto.Field( + proto.STRING, + number=50443319, + optional=True, + ) + + +class InsertRegionHealthCheckRequest(proto.Message): + r"""A request message for RegionHealthChecks.Insert. See the + method description for details. + + Attributes: + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check_resource = proto.Field( + proto.MESSAGE, + number=201925032, + message='HealthCheck', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionHealthCheckServiceRequest(proto.Message): + r"""A request message for RegionHealthCheckServices.Insert. See + the method description for details. + + Attributes: + health_check_service_resource (google.cloud.compute_v1.types.HealthCheckService): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check_service_resource = proto.Field( + proto.MESSAGE, + number=477367794, + message='HealthCheckService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for RegionInstanceGroupManagers.Insert. See + the method description for details. + + Attributes: + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager_resource = proto.Field( + proto.MESSAGE, + number=261063946, + message='InstanceGroupManager', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionNetworkEndpointGroupRequest(proto.Message): + r"""A request message for RegionNetworkEndpointGroups.Insert. See + the method description for details. + + Attributes: + network_endpoint_group_resource (google.cloud.compute_v1.types.NetworkEndpointGroup): + The body resource for this request + project (str): + Project ID for this request. + region (str): + The name of the region where you want to + create the network endpoint group. It should + comply with RFC1035. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network_endpoint_group_resource = proto.Field( + proto.MESSAGE, + number=525788839, + message='NetworkEndpointGroup', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionNotificationEndpointRequest(proto.Message): + r"""A request message for RegionNotificationEndpoints.Insert. See + the method description for details. + + Attributes: + notification_endpoint_resource (google.cloud.compute_v1.types.NotificationEndpoint): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + notification_endpoint_resource = proto.Field( + proto.MESSAGE, + number=338459940, + message='NotificationEndpoint', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class InsertRegionSslCertificateRequest(proto.Message): + r"""A request message for RegionSslCertificates.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + ssl_certificate_resource (google.cloud.compute_v1.types.SslCertificate): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_certificate_resource = proto.Field( + proto.MESSAGE, + number=180709897, + message='SslCertificate', + ) + + +class InsertRegionTargetHttpProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpProxies.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_http_proxy_resource (google.cloud.compute_v1.types.TargetHttpProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_http_proxy_resource = proto.Field( + proto.MESSAGE, + number=24696744, + message='TargetHttpProxy', + ) + + +class InsertRegionTargetHttpsProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpsProxies.Insert. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy_resource (google.cloud.compute_v1.types.TargetHttpsProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy_resource = proto.Field( + proto.MESSAGE, + number=433657473, + message='TargetHttpsProxy', + ) + + +class InsertRegionUrlMapRequest(proto.Message): + r"""A request message for RegionUrlMaps.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + begin_interface: MixerMutationRequestBuilder Request ID to + support idempotency. + + This field is a member of `oneof`_ ``_request_id``. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map_resource = proto.Field( + proto.MESSAGE, + number=168675425, + message='UrlMap', + ) + + +class InsertReservationRequest(proto.Message): + r"""A request message for Reservations.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + reservation_resource (google.cloud.compute_v1.types.Reservation): + The body resource for this request + zone (str): + Name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + reservation_resource = proto.Field( + proto.MESSAGE, + number=285030177, + message='Reservation', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource_policy_resource (google.cloud.compute_v1.types.ResourcePolicy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource_policy_resource = proto.Field( + proto.MESSAGE, + number=76826186, + message='ResourcePolicy', + ) + + +class InsertRouteRequest(proto.Message): + r"""A request message for Routes.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + route_resource (google.cloud.compute_v1.types.Route): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + route_resource = proto.Field( + proto.MESSAGE, + number=225428804, + message='Route', + ) + + +class InsertRouterRequest(proto.Message): + r"""A request message for Routers.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + router_resource = proto.Field( + proto.MESSAGE, + number=155222084, + message='Router', + ) + + +class InsertSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + security_policy_resource (google.cloud.compute_v1.types.SecurityPolicy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + security_policy_resource = proto.Field( + proto.MESSAGE, + number=216159612, + message='SecurityPolicy', + ) + + +class InsertServiceAttachmentRequest(proto.Message): + r"""A request message for ServiceAttachments.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region of this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + service_attachment_resource (google.cloud.compute_v1.types.ServiceAttachment): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + service_attachment_resource = proto.Field( + proto.MESSAGE, + number=472980256, + message='ServiceAttachment', + ) + + +class InsertSslCertificateRequest(proto.Message): + r"""A request message for SslCertificates.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + ssl_certificate_resource (google.cloud.compute_v1.types.SslCertificate): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_certificate_resource = proto.Field( + proto.MESSAGE, + number=180709897, + message='SslCertificate', + ) + + +class InsertSslPolicyRequest(proto.Message): + r"""A request message for SslPolicies.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + ssl_policy_resource (google.cloud.compute_v1.types.SslPolicy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_policy_resource = proto.Field( + proto.MESSAGE, + number=274891848, + message='SslPolicy', + ) + + +class InsertSubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + subnetwork_resource (google.cloud.compute_v1.types.Subnetwork): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + subnetwork_resource = proto.Field( + proto.MESSAGE, + number=42233151, + message='Subnetwork', + ) + + +class InsertTargetGrpcProxyRequest(proto.Message): + r"""A request message for TargetGrpcProxies.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_grpc_proxy_resource (google.cloud.compute_v1.types.TargetGrpcProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_grpc_proxy_resource = proto.Field( + proto.MESSAGE, + number=328922450, + message='TargetGrpcProxy', + ) + + +class InsertTargetHttpProxyRequest(proto.Message): + r"""A request message for TargetHttpProxies.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_http_proxy_resource (google.cloud.compute_v1.types.TargetHttpProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_http_proxy_resource = proto.Field( + proto.MESSAGE, + number=24696744, + message='TargetHttpProxy', + ) + + +class InsertTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy_resource (google.cloud.compute_v1.types.TargetHttpsProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy_resource = proto.Field( + proto.MESSAGE, + number=433657473, + message='TargetHttpsProxy', + ) + + +class InsertTargetInstanceRequest(proto.Message): + r"""A request message for TargetInstances.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_instance_resource (google.cloud.compute_v1.types.TargetInstance): + The body resource for this request + zone (str): + Name of the zone scoping this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_instance_resource = proto.Field( + proto.MESSAGE, + number=430453066, + message='TargetInstance', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class InsertTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_pool_resource (google.cloud.compute_v1.types.TargetPool): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_pool_resource = proto.Field( + proto.MESSAGE, + number=101281443, + message='TargetPool', + ) + + +class InsertTargetSslProxyRequest(proto.Message): + r"""A request message for TargetSslProxies.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_ssl_proxy_resource (google.cloud.compute_v1.types.TargetSslProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_ssl_proxy_resource = proto.Field( + proto.MESSAGE, + number=142016192, + message='TargetSslProxy', + ) + + +class InsertTargetTcpProxyRequest(proto.Message): + r"""A request message for TargetTcpProxies.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_tcp_proxy_resource (google.cloud.compute_v1.types.TargetTcpProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_tcp_proxy_resource = proto.Field( + proto.MESSAGE, + number=145913931, + message='TargetTcpProxy', + ) + + +class InsertTargetVpnGatewayRequest(proto.Message): + r"""A request message for TargetVpnGateways.Insert. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_vpn_gateway_resource (google.cloud.compute_v1.types.TargetVpnGateway): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_vpn_gateway_resource = proto.Field( + proto.MESSAGE, + number=498050, + message='TargetVpnGateway', + ) + + +class InsertUrlMapRequest(proto.Message): + r"""A request message for UrlMaps.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map_resource = proto.Field( + proto.MESSAGE, + number=168675425, + message='UrlMap', + ) + + +class InsertVpnGatewayRequest(proto.Message): + r"""A request message for VpnGateways.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + vpn_gateway_resource (google.cloud.compute_v1.types.VpnGateway): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + vpn_gateway_resource = proto.Field( + proto.MESSAGE, + number=182688660, + message='VpnGateway', + ) + + +class InsertVpnTunnelRequest(proto.Message): + r"""A request message for VpnTunnels.Insert. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + vpn_tunnel_resource (google.cloud.compute_v1.types.VpnTunnel): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + vpn_tunnel_resource = proto.Field( + proto.MESSAGE, + number=86839482, + message='VpnTunnel', + ) + + +class Instance(proto.Message): + r"""Represents an Instance resource. An instance is a virtual + machine that is hosted on Google Cloud Platform. For more + information, read Virtual Machine Instances. + + Attributes: + advanced_machine_features (google.cloud.compute_v1.types.AdvancedMachineFeatures): + Controls for advanced machine-related + behavior features. + + This field is a member of `oneof`_ ``_advanced_machine_features``. + can_ip_forward (bool): + Allows this instance to send and receive + packets with non-matching destination or source + IPs. This is required if you plan to use this + instance to forward routes. For more + information, see Enabling IP Forwarding . + + This field is a member of `oneof`_ ``_can_ip_forward``. + confidential_instance_config (google.cloud.compute_v1.types.ConfidentialInstanceConfig): + + This field is a member of `oneof`_ ``_confidential_instance_config``. + cpu_platform (str): + [Output Only] The CPU platform used by this instance. + + This field is a member of `oneof`_ ``_cpu_platform``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + deletion_protection (bool): + Whether the resource should be protected + against deletion. + + This field is a member of `oneof`_ ``_deletion_protection``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + disks (Sequence[google.cloud.compute_v1.types.AttachedDisk]): + Array of disks associated with this instance. + Persistent disks must be created before you can + assign them. + display_device (google.cloud.compute_v1.types.DisplayDevice): + Enables display device for the instance. + + This field is a member of `oneof`_ ``_display_device``. + fingerprint (str): + Specifies a fingerprint for this resource, + which is essentially a hash of the instance's + contents and used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update the instance. You must always provide + an up-to-date fingerprint hash in order to + update the instance. To see the latest + fingerprint, make get() request to the instance. + + This field is a member of `oneof`_ ``_fingerprint``. + guest_accelerators (Sequence[google.cloud.compute_v1.types.AcceleratorConfig]): + A list of the type and count of accelerator + cards attached to the instance. + hostname (str): + Specifies the hostname of the instance. The specified + hostname must be RFC1035 compliant. If hostname is not + specified, the default hostname is + [INSTANCE_NAME].c.[PROJECT_ID].internal when using the + global DNS, and + [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal when using + zonal DNS. + + This field is a member of `oneof`_ ``_hostname``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#instance + for instances. + + This field is a member of `oneof`_ ``_kind``. + label_fingerprint (str): + A fingerprint for this request, which is + essentially a hash of the label's contents and + used for optimistic locking. The fingerprint is + initially generated by Compute Engine and + changes after every request to modify or update + labels. You must always provide an up-to-date + fingerprint hash in order to update or change + labels. To see the latest fingerprint, make + get() request to the instance. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.Instance.LabelsEntry]): + Labels to apply to this instance. These can + be later modified by the setLabels method. + last_start_timestamp (str): + [Output Only] Last start timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_last_start_timestamp``. + last_stop_timestamp (str): + [Output Only] Last stop timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_last_stop_timestamp``. + last_suspended_timestamp (str): + [Output Only] Last suspended timestamp in RFC3339 text + format. + + This field is a member of `oneof`_ ``_last_suspended_timestamp``. + machine_type (str): + Full or partial URL of the machine type + resource to use for this instance, in the + format: zones/zone/machineTypes/machine-type. + This is provided by the client when the instance + is created. For example, the following is a + valid partial url to a predefined machine type: + zones/us-central1-f/machineTypes/n1-standard-1 + To create a custom machine type, provide a URL + to a machine type in the following format, where + CPUS is 1 or an even number up to 32 (2, 4, 6, + ... 24, etc), and MEMORY is the total memory for + this instance. Memory must be a multiple of 256 + MB and must be supplied in MB (e.g. 5 GB of + memory is 5120 MB): + zones/zone/machineTypes/custom-CPUS-MEMORY For + example: zones/us- + central1-f/machineTypes/custom-4-5120 For a full + list of restrictions, read the Specifications + for custom machine types. + + This field is a member of `oneof`_ ``_machine_type``. + metadata (google.cloud.compute_v1.types.Metadata): + The metadata key/value pairs assigned to this + instance. This includes custom metadata and + predefined keys. + + This field is a member of `oneof`_ ``_metadata``. + min_cpu_platform (str): + Specifies a minimum CPU platform for the VM + instance. Applicable values are the friendly + names of CPU platforms, such as minCpuPlatform: + "Intel Haswell" or minCpuPlatform: "Intel Sandy + Bridge". + + This field is a member of `oneof`_ ``_min_cpu_platform``. + name (str): + The name of the resource, provided by the client when + initially creating the resource. The resource name must be + 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network_interfaces (Sequence[google.cloud.compute_v1.types.NetworkInterface]): + An array of network configurations for this + instance. These specify how interfaces are + configured to interact with other network + services, such as connecting to the internet. + Multiple interfaces are supported per instance. + private_ipv6_google_access (str): + The private IPv6 google access type for the VM. If not + specified, use INHERIT_FROM_SUBNETWORK as default. Check the + PrivateIpv6GoogleAccess enum for the list of possible + values. + + This field is a member of `oneof`_ ``_private_ipv6_google_access``. + reservation_affinity (google.cloud.compute_v1.types.ReservationAffinity): + Specifies the reservations that this instance + can consume from. + + This field is a member of `oneof`_ ``_reservation_affinity``. + resource_policies (Sequence[str]): + Resource policies applied to this instance. + satisfies_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + scheduling (google.cloud.compute_v1.types.Scheduling): + Sets the scheduling options for this + instance. + + This field is a member of `oneof`_ ``_scheduling``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + service_accounts (Sequence[google.cloud.compute_v1.types.ServiceAccount]): + A list of service accounts, with their + specified scopes, authorized for this instance. + Only one service account per VM instance is + supported. Service accounts generate access + tokens that can be accessed through the metadata + server and used to authenticate applications on + the instance. See Service Accounts for more + information. + shielded_instance_config (google.cloud.compute_v1.types.ShieldedInstanceConfig): + + This field is a member of `oneof`_ ``_shielded_instance_config``. + shielded_instance_integrity_policy (google.cloud.compute_v1.types.ShieldedInstanceIntegrityPolicy): + + This field is a member of `oneof`_ ``_shielded_instance_integrity_policy``. + start_restricted (bool): + [Output Only] Whether a VM has been restricted for start + because Compute Engine has detected suspicious activity. + + This field is a member of `oneof`_ ``_start_restricted``. + status (str): + [Output Only] The status of the instance. One of the + following values: PROVISIONING, STAGING, RUNNING, STOPPING, + SUSPENDING, SUSPENDED, REPAIRING, and TERMINATED. For more + information about the status of the instance, see Instance + life cycle. Check the Status enum for the list of possible + values. + + This field is a member of `oneof`_ ``_status``. + status_message (str): + [Output Only] An optional, human-readable explanation of the + status. + + This field is a member of `oneof`_ ``_status_message``. + tags (google.cloud.compute_v1.types.Tags): + Tags to apply to this instance. Tags are used + to identify valid sources or targets for network + firewalls and are specified by the client during + instance creation. The tags can be later + modified by the setTags method. Each tag within + the list must comply with RFC1035. Multiple tags + can be specified via the 'tags.items' field. + + This field is a member of `oneof`_ ``_tags``. + zone (str): + [Output Only] URL of the zone where the instance resides. + You must specify this field as part of the HTTP request URL. + It is not settable as a field in the request body. + + This field is a member of `oneof`_ ``_zone``. + """ + class PrivateIpv6GoogleAccess(proto.Enum): + r"""The private IPv6 google access type for the VM. If not specified, + use INHERIT_FROM_SUBNETWORK as default. + """ + UNDEFINED_PRIVATE_IPV6_GOOGLE_ACCESS = 0 + ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE = 427975994 + ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE = 288210263 + INHERIT_FROM_SUBNETWORK = 530256959 + + class Status(proto.Enum): + r"""[Output Only] The status of the instance. One of the following + values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, + SUSPENDED, REPAIRING, and TERMINATED. For more information about the + status of the instance, see Instance life cycle. + """ + UNDEFINED_STATUS = 0 + DEPROVISIONING = 428935662 + PROVISIONING = 290896621 + REPAIRING = 413483285 + RUNNING = 121282975 + STAGING = 431072283 + STOPPED = 444276141 + STOPPING = 350791796 + SUSPENDED = 51223995 + SUSPENDING = 514206246 + TERMINATED = 250018339 + + advanced_machine_features = proto.Field( + proto.MESSAGE, + number=409646002, + optional=True, + message='AdvancedMachineFeatures', + ) + can_ip_forward = proto.Field( + proto.BOOL, + number=467731324, + optional=True, + ) + confidential_instance_config = proto.Field( + proto.MESSAGE, + number=490637685, + optional=True, + message='ConfidentialInstanceConfig', + ) + cpu_platform = proto.Field( + proto.STRING, + number=410285354, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + deletion_protection = proto.Field( + proto.BOOL, + number=458014698, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disks = proto.RepeatedField( + proto.MESSAGE, + number=95594102, + message='AttachedDisk', + ) + display_device = proto.Field( + proto.MESSAGE, + number=258933875, + optional=True, + message='DisplayDevice', + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + guest_accelerators = proto.RepeatedField( + proto.MESSAGE, + number=463595119, + message='AcceleratorConfig', + ) + hostname = proto.Field( + proto.STRING, + number=237067315, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + last_start_timestamp = proto.Field( + proto.STRING, + number=443830736, + optional=True, + ) + last_stop_timestamp = proto.Field( + proto.STRING, + number=412823010, + optional=True, + ) + last_suspended_timestamp = proto.Field( + proto.STRING, + number=356275337, + optional=True, + ) + machine_type = proto.Field( + proto.STRING, + number=227711026, + optional=True, + ) + metadata = proto.Field( + proto.MESSAGE, + number=86866735, + optional=True, + message='Metadata', + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=242912759, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network_interfaces = proto.RepeatedField( + proto.MESSAGE, + number=52735243, + message='NetworkInterface', + ) + private_ipv6_google_access = proto.Field( + proto.STRING, + number=48277006, + optional=True, + ) + reservation_affinity = proto.Field( + proto.MESSAGE, + number=157850683, + optional=True, + message='ReservationAffinity', + ) + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + scheduling = proto.Field( + proto.MESSAGE, + number=386688404, + optional=True, + message='Scheduling', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + service_accounts = proto.RepeatedField( + proto.MESSAGE, + number=277537328, + message='ServiceAccount', + ) + shielded_instance_config = proto.Field( + proto.MESSAGE, + number=12862901, + optional=True, + message='ShieldedInstanceConfig', + ) + shielded_instance_integrity_policy = proto.Field( + proto.MESSAGE, + number=163696919, + optional=True, + message='ShieldedInstanceIntegrityPolicy', + ) + start_restricted = proto.Field( + proto.BOOL, + number=123693144, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + status_message = proto.Field( + proto.STRING, + number=297428154, + optional=True, + ) + tags = proto.Field( + proto.MESSAGE, + number=3552281, + optional=True, + message='Tags', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class InstanceAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceAggregatedList.ItemsEntry]): + An object that contains a list of instances + scoped by zone. + kind (str): + [Output Only] Type of resource. Always + compute#instanceAggregatedList for aggregated lists of + Instance resources. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='InstancesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroup(proto.Message): + r"""Represents an Instance Group resource. Instance Groups can be + used to configure a target for load balancing. Instance groups + can either be managed or unmanaged. To create managed instance + groups, use the instanceGroupManager or + regionInstanceGroupManager resource instead. Use zonal unmanaged + instance groups if you need to apply load balancing to groups of + heterogeneous instances or if you need to manage the instances + yourself. You cannot create regional unmanaged instance groups. + For more information, read Instance groups. + + Attributes: + creation_timestamp (str): + [Output Only] The creation timestamp for this instance group + in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + [Output Only] The fingerprint of the named ports. The system + uses this fingerprint to detect conflicts when multiple + users change the named ports concurrently. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] A unique identifier for this instance group, + generated by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroup for instance groups. + + This field is a member of `oneof`_ ``_kind``. + name (str): + The name of the instance group. The name must + be 1-63 characters long, and comply with + RFC1035. + + This field is a member of `oneof`_ ``_name``. + named_ports (Sequence[google.cloud.compute_v1.types.NamedPort]): + Assigns a name to a port number. For example: {name: "http", + port: 80} This allows the system to reference ports by the + assigned name instead of a port number. Named ports can also + contain multiple ports. For example: [{name: "http", port: + 80},{name: "http", port: 8080}] Named ports apply to all + instances in this instance group. + network (str): + [Output Only] The URL of the network to which all instances + in the instance group belong. If your instance has multiple + network interfaces, then the network and subnetwork fields + only refer to the network and subnet used by your primary + interface (nic0). + + This field is a member of `oneof`_ ``_network``. + region (str): + [Output Only] The URL of the region where the instance group + is located (for regional resources). + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] The URL for this instance group. The server + generates this URL. + + This field is a member of `oneof`_ ``_self_link``. + size (int): + [Output Only] The total number of instances in the instance + group. + + This field is a member of `oneof`_ ``_size``. + subnetwork (str): + [Output Only] The URL of the subnetwork to which all + instances in the instance group belong. If your instance has + multiple network interfaces, then the network and subnetwork + fields only refer to the network and subnet used by your + primary interface (nic0). + + This field is a member of `oneof`_ ``_subnetwork``. + zone (str): + [Output Only] The URL of the zone where the instance group + is located (for zonal resources). + + This field is a member of `oneof`_ ``_zone``. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + named_ports = proto.RepeatedField( + proto.MESSAGE, + number=427598732, + message='NamedPort', + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + size = proto.Field( + proto.INT32, + number=3530753, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class InstanceGroupAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceGroupAggregatedList.ItemsEntry]): + A list of InstanceGroupsScopedList resources. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroupAggregatedList for aggregated lists of + instance groups. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='InstanceGroupsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupList(proto.Message): + r"""A list of InstanceGroup resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceGroup]): + A list of InstanceGroup resources. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroupList for instance group lists. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceGroup', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupManager(proto.Message): + r"""Represents a Managed Instance Group resource. An instance + group is a collection of VM instances that you can manage as a + single entity. For more information, read Instance groups. For + zonal Managed Instance Group, use the instanceGroupManagers + resource. For regional Managed Instance Group, use the + regionInstanceGroupManagers resource. + + Attributes: + auto_healing_policies (Sequence[google.cloud.compute_v1.types.InstanceGroupManagerAutoHealingPolicy]): + The autohealing policy for this managed + instance group. You can specify only one value. + base_instance_name (str): + The base instance name to use for instances + in this group. The value must be 1-58 characters + long. Instances are named by appending a hyphen + and a random four-character string to the base + instance name. The base instance name must + comply with RFC1035. + + This field is a member of `oneof`_ ``_base_instance_name``. + creation_timestamp (str): + [Output Only] The creation timestamp for this managed + instance group in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + current_actions (google.cloud.compute_v1.types.InstanceGroupManagerActionsSummary): + [Output Only] The list of instance actions and the number of + instances in this managed instance group that are scheduled + for each of those actions. + + This field is a member of `oneof`_ ``_current_actions``. + description (str): + An optional description of this resource. + + This field is a member of `oneof`_ ``_description``. + distribution_policy (google.cloud.compute_v1.types.DistributionPolicy): + Policy specifying the intended distribution + of managed instances across zones in a regional + managed instance group. + + This field is a member of `oneof`_ ``_distribution_policy``. + fingerprint (str): + Fingerprint of this resource. This field may + be used in optimistic locking. It will be + ignored when inserting an InstanceGroupManager. + An up-to-date fingerprint must be provided in + order to update the InstanceGroupManager, + otherwise the request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve an + InstanceGroupManager. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] A unique identifier for this resource type. + The server generates this identifier. + + This field is a member of `oneof`_ ``_id``. + instance_group (str): + [Output Only] The URL of the Instance Group resource. + + This field is a member of `oneof`_ ``_instance_group``. + instance_template (str): + The URL of the instance template that is + specified for this managed instance group. The + group uses this template to create all new + instances in the managed instance group. The + templates for existing instances in the group do + not change unless you run recreateInstances, run + applyUpdatesToInstances, or set the group's + updatePolicy.type to PROACTIVE. + + This field is a member of `oneof`_ ``_instance_template``. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroupManager for managed instance groups. + + This field is a member of `oneof`_ ``_kind``. + name (str): + The name of the managed instance group. The + name must be 1-63 characters long, and comply + with RFC1035. + + This field is a member of `oneof`_ ``_name``. + named_ports (Sequence[google.cloud.compute_v1.types.NamedPort]): + Named ports configured for the Instance + Groups complementary to this Instance Group + Manager. + region (str): + [Output Only] The URL of the region where the managed + instance group resides (for regional resources). + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] The URL for this managed instance group. The + server defines this URL. + + This field is a member of `oneof`_ ``_self_link``. + stateful_policy (google.cloud.compute_v1.types.StatefulPolicy): + Stateful configuration for this Instanced + Group Manager + + This field is a member of `oneof`_ ``_stateful_policy``. + status (google.cloud.compute_v1.types.InstanceGroupManagerStatus): + [Output Only] The status of this managed instance group. + + This field is a member of `oneof`_ ``_status``. + target_pools (Sequence[str]): + The URLs for all TargetPool resources to + which instances in the instanceGroup field are + added. The target pools automatically apply to + all of the instances in the managed instance + group. + target_size (int): + The target number of running instances for + this managed instance group. You can reduce this + number by using the instanceGroupManager + deleteInstances or abandonInstances methods. + Resizing the group also changes this number. + + This field is a member of `oneof`_ ``_target_size``. + update_policy (google.cloud.compute_v1.types.InstanceGroupManagerUpdatePolicy): + The update policy for this managed instance + group. + + This field is a member of `oneof`_ ``_update_policy``. + versions (Sequence[google.cloud.compute_v1.types.InstanceGroupManagerVersion]): + Specifies the instance templates used by this + managed instance group to create instances. Each + version is defined by an instanceTemplate and a + name. Every version can appear at most once per + instance group. This field overrides the top- + level instanceTemplate field. Read more about + the relationships between these fields. Exactly + one version must leave the targetSize field + unset. That version will be applied to all + remaining instances. For more information, read + about canary updates. + zone (str): + [Output Only] The URL of a zone where the managed instance + group is located (for zonal resources). + + This field is a member of `oneof`_ ``_zone``. + """ + + auto_healing_policies = proto.RepeatedField( + proto.MESSAGE, + number=456799109, + message='InstanceGroupManagerAutoHealingPolicy', + ) + base_instance_name = proto.Field( + proto.STRING, + number=389106439, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + current_actions = proto.Field( + proto.MESSAGE, + number=164045879, + optional=True, + message='InstanceGroupManagerActionsSummary', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + distribution_policy = proto.Field( + proto.MESSAGE, + number=534558541, + optional=True, + message='DistributionPolicy', + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + instance_group = proto.Field( + proto.STRING, + number=81095253, + optional=True, + ) + instance_template = proto.Field( + proto.STRING, + number=309248228, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + named_ports = proto.RepeatedField( + proto.MESSAGE, + number=427598732, + message='NamedPort', + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + stateful_policy = proto.Field( + proto.MESSAGE, + number=47538565, + optional=True, + message='StatefulPolicy', + ) + status = proto.Field( + proto.MESSAGE, + number=181260274, + optional=True, + message='InstanceGroupManagerStatus', + ) + target_pools = proto.RepeatedField( + proto.STRING, + number=336072617, + ) + target_size = proto.Field( + proto.INT32, + number=62880239, + optional=True, + ) + update_policy = proto.Field( + proto.MESSAGE, + number=175809896, + optional=True, + message='InstanceGroupManagerUpdatePolicy', + ) + versions = proto.RepeatedField( + proto.MESSAGE, + number=162430619, + message='InstanceGroupManagerVersion', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class InstanceGroupManagerActionsSummary(proto.Message): + r""" + + Attributes: + abandoning (int): + [Output Only] The total number of instances in the managed + instance group that are scheduled to be abandoned. + Abandoning an instance removes it from the managed instance + group without deleting it. + + This field is a member of `oneof`_ ``_abandoning``. + creating (int): + [Output Only] The number of instances in the managed + instance group that are scheduled to be created or are + currently being created. If the group fails to create any of + these instances, it tries again until it creates the + instance successfully. If you have disabled creation + retries, this field will not be populated; instead, the + creatingWithoutRetries field will be populated. + + This field is a member of `oneof`_ ``_creating``. + creating_without_retries (int): + [Output Only] The number of instances that the managed + instance group will attempt to create. The group attempts to + create each instance only once. If the group fails to create + any of these instances, it decreases the group's targetSize + value accordingly. + + This field is a member of `oneof`_ ``_creating_without_retries``. + deleting (int): + [Output Only] The number of instances in the managed + instance group that are scheduled to be deleted or are + currently being deleted. + + This field is a member of `oneof`_ ``_deleting``. + none (int): + [Output Only] The number of instances in the managed + instance group that are running and have no scheduled + actions. + + This field is a member of `oneof`_ ``_none``. + recreating (int): + [Output Only] The number of instances in the managed + instance group that are scheduled to be recreated or are + currently being being recreated. Recreating an instance + deletes the existing root persistent disk and creates a new + disk from the image that is defined in the instance + template. + + This field is a member of `oneof`_ ``_recreating``. + refreshing (int): + [Output Only] The number of instances in the managed + instance group that are being reconfigured with properties + that do not require a restart or a recreate action. For + example, setting or removing target pools for the instance. + + This field is a member of `oneof`_ ``_refreshing``. + restarting (int): + [Output Only] The number of instances in the managed + instance group that are scheduled to be restarted or are + currently being restarted. + + This field is a member of `oneof`_ ``_restarting``. + verifying (int): + [Output Only] The number of instances in the managed + instance group that are being verified. See the + managedInstances[].currentAction property in the + listManagedInstances method documentation. + + This field is a member of `oneof`_ ``_verifying``. + """ + + abandoning = proto.Field( + proto.INT32, + number=440023373, + optional=True, + ) + creating = proto.Field( + proto.INT32, + number=209809081, + optional=True, + ) + creating_without_retries = proto.Field( + proto.INT32, + number=369916745, + optional=True, + ) + deleting = proto.Field( + proto.INT32, + number=282846120, + optional=True, + ) + none = proto.Field( + proto.INT32, + number=3387192, + optional=True, + ) + recreating = proto.Field( + proto.INT32, + number=339057132, + optional=True, + ) + refreshing = proto.Field( + proto.INT32, + number=215044903, + optional=True, + ) + restarting = proto.Field( + proto.INT32, + number=372312947, + optional=True, + ) + verifying = proto.Field( + proto.INT32, + number=451612873, + optional=True, + ) + + +class InstanceGroupManagerAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceGroupManagerAggregatedList.ItemsEntry]): + A list of InstanceGroupManagersScopedList + resources. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroupManagerAggregatedList for an aggregated + list of managed instance groups. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='InstanceGroupManagersScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupManagerAutoHealingPolicy(proto.Message): + r""" + + Attributes: + health_check (str): + The URL for the health check that signals + autohealing. + + This field is a member of `oneof`_ ``_health_check``. + initial_delay_sec (int): + The number of seconds that the managed instance group waits + before it applies autohealing policies to new instances or + recently recreated instances. This initial delay allows + instances to initialize and run their startup scripts before + the instance group determines that they are UNHEALTHY. This + prevents the managed instance group from recreating its + instances prematurely. This value must be from range [0, + 3600]. + + This field is a member of `oneof`_ ``_initial_delay_sec``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + optional=True, + ) + initial_delay_sec = proto.Field( + proto.INT32, + number=263207002, + optional=True, + ) + + +class InstanceGroupManagerList(proto.Message): + r"""[Output Only] A list of managed instance groups. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceGroupManager]): + A list of InstanceGroupManager resources. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroupManagerList for a list of managed + instance groups. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceGroupManager', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupManagerStatus(proto.Message): + r""" + + Attributes: + autoscaler (str): + [Output Only] The URL of the Autoscaler that targets this + instance group manager. + + This field is a member of `oneof`_ ``_autoscaler``. + is_stable (bool): + [Output Only] A bit indicating whether the managed instance + group is in a stable state. A stable state means that: none + of the instances in the managed instance group is currently + undergoing any type of change (for example, creation, + restart, or deletion); no future changes are scheduled for + instances in the managed instance group; and the managed + instance group itself is not being modified. + + This field is a member of `oneof`_ ``_is_stable``. + stateful (google.cloud.compute_v1.types.InstanceGroupManagerStatusStateful): + [Output Only] Stateful status of the given Instance Group + Manager. + + This field is a member of `oneof`_ ``_stateful``. + version_target (google.cloud.compute_v1.types.InstanceGroupManagerStatusVersionTarget): + [Output Only] A status of consistency of Instances' versions + with their target version specified by version field on + Instance Group Manager. + + This field is a member of `oneof`_ ``_version_target``. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + optional=True, + ) + is_stable = proto.Field( + proto.BOOL, + number=108410864, + optional=True, + ) + stateful = proto.Field( + proto.MESSAGE, + number=244462412, + optional=True, + message='InstanceGroupManagerStatusStateful', + ) + version_target = proto.Field( + proto.MESSAGE, + number=289386200, + optional=True, + message='InstanceGroupManagerStatusVersionTarget', + ) + + +class InstanceGroupManagerStatusStateful(proto.Message): + r""" + + Attributes: + has_stateful_config (bool): + [Output Only] A bit indicating whether the managed instance + group has stateful configuration, that is, if you have + configured any items in a stateful policy or in per-instance + configs. The group might report that it has no stateful + config even when there is still some preserved state on a + managed instance, for example, if you have deleted all PICs + but not yet applied those deletions. + + This field is a member of `oneof`_ ``_has_stateful_config``. + per_instance_configs (google.cloud.compute_v1.types.InstanceGroupManagerStatusStatefulPerInstanceConfigs): + [Output Only] Status of per-instance configs on the + instance. + + This field is a member of `oneof`_ ``_per_instance_configs``. + """ + + has_stateful_config = proto.Field( + proto.BOOL, + number=110474224, + optional=True, + ) + per_instance_configs = proto.Field( + proto.MESSAGE, + number=526265001, + optional=True, + message='InstanceGroupManagerStatusStatefulPerInstanceConfigs', + ) + + +class InstanceGroupManagerStatusStatefulPerInstanceConfigs(proto.Message): + r""" + + Attributes: + all_effective (bool): + A bit indicating if all of the group's per- + nstance configs (listed in the output of a + listPerInstanceConfigs API call) have status + EFFECTIVE or there are no per-instance-configs. + + This field is a member of `oneof`_ ``_all_effective``. + """ + + all_effective = proto.Field( + proto.BOOL, + number=516540553, + optional=True, + ) + + +class InstanceGroupManagerStatusVersionTarget(proto.Message): + r""" + + Attributes: + is_reached (bool): + [Output Only] A bit indicating whether version target has + been reached in this managed instance group, i.e. all + instances are in their target version. Instances' target + version are specified by version field on Instance Group + Manager. + + This field is a member of `oneof`_ ``_is_reached``. + """ + + is_reached = proto.Field( + proto.BOOL, + number=433209149, + optional=True, + ) + + +class InstanceGroupManagerUpdatePolicy(proto.Message): + r""" + + Attributes: + instance_redistribution_type (str): + The instance redistribution policy for + regional managed instance groups. Valid values + are: - PROACTIVE (default): The group attempts + to maintain an even distribution of VM instances + across zones in the region. - NONE: For non- + autoscaled groups, proactive redistribution is + disabled. + + This field is a member of `oneof`_ ``_instance_redistribution_type``. + max_surge (google.cloud.compute_v1.types.FixedOrPercent): + The maximum number of instances that can be + created above the specified targetSize during + the update process. This value can be either a + fixed number or, if the group has 10 or more + instances, a percentage. If you set a + percentage, the number of instances is rounded + if necessary. The default value for maxSurge is + a fixed value equal to the number of zones in + which the managed instance group operates. At + least one of either maxSurge or maxUnavailable + must be greater than 0. Learn more about + maxSurge. + + This field is a member of `oneof`_ ``_max_surge``. + max_unavailable (google.cloud.compute_v1.types.FixedOrPercent): + The maximum number of instances that can be + unavailable during the update process. An + instance is considered available if all of the + following conditions are satisfied: - The + instance's status is RUNNING. - If there is a + health check on the instance group, the + instance's health check status must be HEALTHY + at least once. If there is no health check on + the group, then the instance only needs to have + a status of RUNNING to be considered available. + This value can be either a fixed number or, if + the group has 10 or more instances, a + percentage. If you set a percentage, the number + of instances is rounded if necessary. The + default value for maxUnavailable is a fixed + value equal to the number of zones in which the + managed instance group operates. At least one of + either maxSurge or maxUnavailable must be + greater than 0. Learn more about maxUnavailable. + + This field is a member of `oneof`_ ``_max_unavailable``. + minimal_action (str): + Minimal action to be taken on an instance. + You can specify either RESTART to restart + existing instances or REPLACE to delete and + create new instances from the target template. + If you specify a RESTART, the Updater will + attempt to perform that action only. However, if + the Updater determines that the minimal action + you specify is not enough to perform the update, + it might perform a more disruptive action. + + This field is a member of `oneof`_ ``_minimal_action``. + replacement_method (str): + What action should be used to replace instances. See + minimal_action.REPLACE + + This field is a member of `oneof`_ ``_replacement_method``. + type_ (str): + The type of update process. You can specify + either PROACTIVE so that the instance group + manager proactively executes actions in order to + bring instances to their target versions or + OPPORTUNISTIC so that no action is proactively + executed but the update will be performed as + part of other actions (for example, resizes or + recreateInstances calls). + + This field is a member of `oneof`_ ``_type``. + """ + + instance_redistribution_type = proto.Field( + proto.STRING, + number=292630424, + optional=True, + ) + max_surge = proto.Field( + proto.MESSAGE, + number=302572691, + optional=True, + message='FixedOrPercent', + ) + max_unavailable = proto.Field( + proto.MESSAGE, + number=404940277, + optional=True, + message='FixedOrPercent', + ) + minimal_action = proto.Field( + proto.STRING, + number=270567060, + optional=True, + ) + replacement_method = proto.Field( + proto.STRING, + number=505931694, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class InstanceGroupManagerVersion(proto.Message): + r""" + + Attributes: + instance_template (str): + The URL of the instance template that is specified for this + managed instance group. The group uses this template to + create new instances in the managed instance group until the + ``targetSize`` for this version is reached. The templates + for existing instances in the group do not change unless you + run recreateInstances, run applyUpdatesToInstances, or set + the group's updatePolicy.type to PROACTIVE; in those cases, + existing instances are updated until the ``targetSize`` for + this version is reached. + + This field is a member of `oneof`_ ``_instance_template``. + name (str): + Name of the version. Unique among all + versions in the scope of this managed instance + group. + + This field is a member of `oneof`_ ``_name``. + target_size (google.cloud.compute_v1.types.FixedOrPercent): + Specifies the intended number of instances to be created + from the instanceTemplate. The final number of instances + created from the template will be equal to: - If expressed + as a fixed number, the minimum of either targetSize.fixed or + instanceGroupManager.targetSize is used. - if expressed as a + percent, the targetSize would be (targetSize.percent/100 \* + InstanceGroupManager.targetSize) If there is a remainder, + the number is rounded. If unset, this version will update + any remaining instances not updated by another version. Read + Starting a canary update for more information. + + This field is a member of `oneof`_ ``_target_size``. + """ + + instance_template = proto.Field( + proto.STRING, + number=309248228, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + target_size = proto.Field( + proto.MESSAGE, + number=62880239, + optional=True, + message='FixedOrPercent', + ) + + +class InstanceGroupManagersAbandonInstancesRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[str]): + The URLs of one or more instances to abandon. This can be a + full URL or a partial URL, such as + zones/[ZONE]/instances/[INSTANCE_NAME]. + """ + + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + + +class InstanceGroupManagersApplyUpdatesRequest(proto.Message): + r"""InstanceGroupManagers.applyUpdatesToInstances + + Attributes: + all_instances (bool): + Flag to update all instances instead of + specified list of ���instances���. If the flag + is set to true then the instances may not be + specified in the request. + + This field is a member of `oneof`_ ``_all_instances``. + instances (Sequence[str]): + The list of URLs of one or more instances for which you want + to apply updates. Each URL can be a full URL or a partial + URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. + minimal_action (str): + The minimal action that you want to perform + on each instance during the update: - REPLACE: + At minimum, delete the instance and create it + again. - RESTART: Stop the instance and start it + again. - REFRESH: Do not stop the instance. - + NONE: Do not disrupt the instance at all. By + default, the minimum action is NONE. If your + update requires a more disruptive action than + you set with this flag, the necessary action is + performed to execute the update. + + This field is a member of `oneof`_ ``_minimal_action``. + most_disruptive_allowed_action (str): + The most disruptive action that you want to + perform on each instance during the update: - + REPLACE: Delete the instance and create it + again. - RESTART: Stop the instance and start it + again. - REFRESH: Do not stop the instance. - + NONE: Do not disrupt the instance at all. By + default, the most disruptive allowed action is + REPLACE. If your update requires a more + disruptive action than you set with this flag, + the update request will fail. + + This field is a member of `oneof`_ ``_most_disruptive_allowed_action``. + """ + + all_instances = proto.Field( + proto.BOOL, + number=403676512, + optional=True, + ) + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + minimal_action = proto.Field( + proto.STRING, + number=270567060, + optional=True, + ) + most_disruptive_allowed_action = proto.Field( + proto.STRING, + number=66103053, + optional=True, + ) + + +class InstanceGroupManagersCreateInstancesRequest(proto.Message): + r"""InstanceGroupManagers.createInstances + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + [Required] List of specifications of per-instance configs. + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='PerInstanceConfig', + ) + + +class InstanceGroupManagersDeleteInstancesRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[str]): + The URLs of one or more instances to delete. This can be a + full URL or a partial URL, such as + zones/[ZONE]/instances/[INSTANCE_NAME]. + skip_instances_on_validation_error (bool): + Specifies whether the request should proceed despite the + inclusion of instances that are not members of the group or + that are already in the process of being deleted or + abandoned. If this field is set to ``false`` and such an + instance is specified in the request, the operation fails. + The operation always fails if the request contains a + malformed instance URL or a reference to an instance that + exists in a zone or region other than the group's zone or + region. + + This field is a member of `oneof`_ ``_skip_instances_on_validation_error``. + """ + + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + skip_instances_on_validation_error = proto.Field( + proto.BOOL, + number=40631073, + optional=True, + ) + + +class InstanceGroupManagersDeletePerInstanceConfigsReq(proto.Message): + r"""InstanceGroupManagers.deletePerInstanceConfigs + + Attributes: + names (Sequence[str]): + The list of instance names for which we want + to delete per-instance configs on this managed + instance group. + """ + + names = proto.RepeatedField( + proto.STRING, + number=104585032, + ) + + +class InstanceGroupManagersListErrorsResponse(proto.Message): + r""" + + Attributes: + items (Sequence[google.cloud.compute_v1.types.InstanceManagedByIgmError]): + [Output Only] The list of errors of the managed instance + group. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + """ + + @property + def raw_page(self): + return self + + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceManagedByIgmError', + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + + +class InstanceGroupManagersListManagedInstancesResponse(proto.Message): + r""" + + Attributes: + managed_instances (Sequence[google.cloud.compute_v1.types.ManagedInstance]): + [Output Only] The list of instances in the managed instance + group. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + """ + + @property + def raw_page(self): + return self + + managed_instances = proto.RepeatedField( + proto.MESSAGE, + number=336219614, + message='ManagedInstance', + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + + +class InstanceGroupManagersListPerInstanceConfigsResp(proto.Message): + r""" + + Attributes: + items (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + [Output Only] The list of PerInstanceConfig. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='PerInstanceConfig', + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupManagersPatchPerInstanceConfigsReq(proto.Message): + r"""InstanceGroupManagers.patchPerInstanceConfigs + + Attributes: + per_instance_configs (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + The list of per-instance configs to insert or + patch on this managed instance group. + """ + + per_instance_configs = proto.RepeatedField( + proto.MESSAGE, + number=526265001, + message='PerInstanceConfig', + ) + + +class InstanceGroupManagersRecreateInstancesRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[str]): + The URLs of one or more instances to recreate. This can be a + full URL or a partial URL, such as + zones/[ZONE]/instances/[INSTANCE_NAME]. + """ + + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + + +class InstanceGroupManagersScopedList(proto.Message): + r""" + + Attributes: + instance_group_managers (Sequence[google.cloud.compute_v1.types.InstanceGroupManager]): + [Output Only] The list of managed instance groups that are + contained in the specified project and zone. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] The warning that replaces the list of managed + instance groups when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + instance_group_managers = proto.RepeatedField( + proto.MESSAGE, + number=214072592, + message='InstanceGroupManager', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupManagersSetInstanceTemplateRequest(proto.Message): + r""" + + Attributes: + instance_template (str): + The URL of the instance template that is + specified for this managed instance group. The + group uses this template to create all new + instances in the managed instance group. The + templates for existing instances in the group do + not change unless you run recreateInstances, run + applyUpdatesToInstances, or set the group's + updatePolicy.type to PROACTIVE. + + This field is a member of `oneof`_ ``_instance_template``. + """ + + instance_template = proto.Field( + proto.STRING, + number=309248228, + optional=True, + ) + + +class InstanceGroupManagersSetTargetPoolsRequest(proto.Message): + r""" + + Attributes: + fingerprint (str): + The fingerprint of the target pools + information. Use this optional property to + prevent conflicts when multiple users change the + target pools settings concurrently. Obtain the + fingerprint with the instanceGroupManagers.get + method. Then, include the fingerprint in your + request to ensure that you do not overwrite + changes that were applied from another + concurrent request. + + This field is a member of `oneof`_ ``_fingerprint``. + target_pools (Sequence[str]): + The list of target pool URLs that instances + in this managed instance group belong to. The + managed instance group applies these target + pools to all of the instances in the group. + Existing instances and new instances in the + group all receive these target pool settings. + """ + + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + target_pools = proto.RepeatedField( + proto.STRING, + number=336072617, + ) + + +class InstanceGroupManagersUpdatePerInstanceConfigsReq(proto.Message): + r"""InstanceGroupManagers.updatePerInstanceConfigs + + Attributes: + per_instance_configs (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + The list of per-instance configs to insert or + patch on this managed instance group. + """ + + per_instance_configs = proto.RepeatedField( + proto.MESSAGE, + number=526265001, + message='PerInstanceConfig', + ) + + +class InstanceGroupsAddInstancesRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.InstanceReference]): + The list of instances to add to the instance + group. + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='InstanceReference', + ) + + +class InstanceGroupsListInstances(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceWithNamedPorts]): + A list of InstanceWithNamedPorts resources. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroupsListInstances for the list of + instances in the specified instance group. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceWithNamedPorts', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupsListInstancesRequest(proto.Message): + r""" + + Attributes: + instance_state (str): + A filter for the state of the instances in + the instance group. Valid options are ALL or + RUNNING. If you do not specify this parameter + the list includes all instances regardless of + their state. Check the InstanceState enum for + the list of possible values. + + This field is a member of `oneof`_ ``_instance_state``. + """ + class InstanceState(proto.Enum): + r"""A filter for the state of the instances in the instance + group. Valid options are ALL or RUNNING. If you do not specify + this parameter the list includes all instances regardless of + their state. + """ + UNDEFINED_INSTANCE_STATE = 0 + ALL = 64897 + RUNNING = 121282975 + + instance_state = proto.Field( + proto.STRING, + number=92223591, + optional=True, + ) + + +class InstanceGroupsRemoveInstancesRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.InstanceReference]): + The list of instances to remove from the + instance group. + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='InstanceReference', + ) + + +class InstanceGroupsScopedList(proto.Message): + r""" + + Attributes: + instance_groups (Sequence[google.cloud.compute_v1.types.InstanceGroup]): + [Output Only] The list of instance groups that are contained + in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] An informational warning that replaces the + list of instance groups when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + instance_groups = proto.RepeatedField( + proto.MESSAGE, + number=366469310, + message='InstanceGroup', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceGroupsSetNamedPortsRequest(proto.Message): + r""" + + Attributes: + fingerprint (str): + The fingerprint of the named ports + information for this instance group. Use this + optional property to prevent conflicts when + multiple users change the named ports settings + concurrently. Obtain the fingerprint with the + instanceGroups.get method. Then, include the + fingerprint in your request to ensure that you + do not overwrite changes that were applied from + another concurrent request. A request with an + incorrect fingerprint will fail with error 412 + conditionNotMet. + + This field is a member of `oneof`_ ``_fingerprint``. + named_ports (Sequence[google.cloud.compute_v1.types.NamedPort]): + The list of named ports to set for this + instance group. + """ + + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + named_ports = proto.RepeatedField( + proto.MESSAGE, + number=427598732, + message='NamedPort', + ) + + +class InstanceList(proto.Message): + r"""Contains a list of instances. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Instance]): + A list of Instance resources. + kind (str): + [Output Only] Type of resource. Always compute#instanceList + for lists of Instance resources. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Instance', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceListReferrers(proto.Message): + r"""Contains a list of instance referrers. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Reference]): + A list of Reference resources. + kind (str): + [Output Only] Type of resource. Always + compute#instanceListReferrers for lists of Instance + referrers. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Reference', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceManagedByIgmError(proto.Message): + r""" + + Attributes: + error (google.cloud.compute_v1.types.InstanceManagedByIgmErrorManagedInstanceError): + [Output Only] Contents of the error. + + This field is a member of `oneof`_ ``_error``. + instance_action_details (google.cloud.compute_v1.types.InstanceManagedByIgmErrorInstanceActionDetails): + [Output Only] Details of the instance action that triggered + this error. May be null, if the error was not caused by an + action on an instance. This field is optional. + + This field is a member of `oneof`_ ``_instance_action_details``. + timestamp (str): + [Output Only] The time that this error occurred. This value + is in RFC3339 text format. + + This field is a member of `oneof`_ ``_timestamp``. + """ + + error = proto.Field( + proto.MESSAGE, + number=96784904, + optional=True, + message='InstanceManagedByIgmErrorManagedInstanceError', + ) + instance_action_details = proto.Field( + proto.MESSAGE, + number=292224547, + optional=True, + message='InstanceManagedByIgmErrorInstanceActionDetails', + ) + timestamp = proto.Field( + proto.STRING, + number=55126294, + optional=True, + ) + + +class InstanceManagedByIgmErrorInstanceActionDetails(proto.Message): + r""" + + Attributes: + action (str): + [Output Only] Action that managed instance group was + executing on the instance when the error occurred. Possible + values: Check the Action enum for the list of possible + values. + + This field is a member of `oneof`_ ``_action``. + instance (str): + [Output Only] The URL of the instance. The URL can be set + even if the instance has not yet been created. + + This field is a member of `oneof`_ ``_instance``. + version (google.cloud.compute_v1.types.ManagedInstanceVersion): + [Output Only] Version this instance was created from, or was + being created from, but the creation failed. Corresponds to + one of the versions that were set on the Instance Group + Manager resource at the time this instance was being + created. + + This field is a member of `oneof`_ ``_version``. + """ + class Action(proto.Enum): + r"""[Output Only] Action that managed instance group was executing on + the instance when the error occurred. Possible values: + """ + UNDEFINED_ACTION = 0 + ABANDONING = 388244813 + CREATING = 455564985 + CREATING_WITHOUT_RETRIES = 428843785 + DELETING = 528602024 + NONE = 2402104 + RECREATING = 287278572 + REFRESHING = 163266343 + RESTARTING = 320534387 + VERIFYING = 16982185 + + action = proto.Field( + proto.STRING, + number=187661878, + optional=True, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + optional=True, + ) + version = proto.Field( + proto.MESSAGE, + number=351608024, + optional=True, + message='ManagedInstanceVersion', + ) + + +class InstanceManagedByIgmErrorManagedInstanceError(proto.Message): + r""" + + Attributes: + code (str): + [Output Only] Error code. + + This field is a member of `oneof`_ ``_code``. + message (str): + [Output Only] Error message. + + This field is a member of `oneof`_ ``_message``. + """ + + code = proto.Field( + proto.STRING, + number=3059181, + optional=True, + ) + message = proto.Field( + proto.STRING, + number=418054151, + optional=True, + ) + + +class InstanceMoveRequest(proto.Message): + r""" + + Attributes: + destination_zone (str): + The URL of the destination zone to move the + instance. This can be a full or partial URL. For + example, the following are all valid URLs to a + zone: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + - projects/project/zones/zone - zones/zone + + This field is a member of `oneof`_ ``_destination_zone``. + target_instance (str): + The URL of the target instance to move. This + can be a full or partial URL. For example, the + following are all valid URLs to an instance: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /instances/instance - + projects/project/zones/zone/instances/instance - + zones/zone/instances/instance + + This field is a member of `oneof`_ ``_target_instance``. + """ + + destination_zone = proto.Field( + proto.STRING, + number=131854653, + optional=True, + ) + target_instance = proto.Field( + proto.STRING, + number=289769347, + optional=True, + ) + + +class InstanceProperties(proto.Message): + r""" + + Attributes: + advanced_machine_features (google.cloud.compute_v1.types.AdvancedMachineFeatures): + Controls for advanced machine-related + behavior features. + + This field is a member of `oneof`_ ``_advanced_machine_features``. + can_ip_forward (bool): + Enables instances created based on these + properties to send packets with source IP + addresses other than their own and receive + packets with destination IP addresses other than + their own. If these instances will be used as an + IP gateway or it will be set as the next-hop in + a Route resource, specify true. If unsure, leave + this set to false. See the Enable IP forwarding + documentation for more information. + + This field is a member of `oneof`_ ``_can_ip_forward``. + confidential_instance_config (google.cloud.compute_v1.types.ConfidentialInstanceConfig): + Specifies the Confidential Instance options. + + This field is a member of `oneof`_ ``_confidential_instance_config``. + description (str): + An optional text description for the + instances that are created from these + properties. + + This field is a member of `oneof`_ ``_description``. + disks (Sequence[google.cloud.compute_v1.types.AttachedDisk]): + An array of disks that are associated with + the instances that are created from these + properties. + guest_accelerators (Sequence[google.cloud.compute_v1.types.AcceleratorConfig]): + A list of guest accelerator cards' type and + count to use for instances created from these + properties. + labels (Sequence[google.cloud.compute_v1.types.InstanceProperties.LabelsEntry]): + Labels to apply to instances that are created + from these properties. + machine_type (str): + The machine type to use for instances that + are created from these properties. + + This field is a member of `oneof`_ ``_machine_type``. + metadata (google.cloud.compute_v1.types.Metadata): + The metadata key/value pairs to assign to + instances that are created from these + properties. These pairs can consist of custom + metadata or predefined keys. See Project and + instance metadata for more information. + + This field is a member of `oneof`_ ``_metadata``. + min_cpu_platform (str): + Minimum cpu/platform to be used by instances. + The instance may be scheduled on the specified + or newer cpu/platform. Applicable values are the + friendly names of CPU platforms, such as + minCpuPlatform: "Intel Haswell" or + minCpuPlatform: "Intel Sandy Bridge". For more + information, read Specifying a Minimum CPU + Platform. + + This field is a member of `oneof`_ ``_min_cpu_platform``. + network_interfaces (Sequence[google.cloud.compute_v1.types.NetworkInterface]): + An array of network access configurations for + this interface. + private_ipv6_google_access (str): + The private IPv6 google access type for VMs. If not + specified, use INHERIT_FROM_SUBNETWORK as default. Check the + PrivateIpv6GoogleAccess enum for the list of possible + values. + + This field is a member of `oneof`_ ``_private_ipv6_google_access``. + reservation_affinity (google.cloud.compute_v1.types.ReservationAffinity): + Specifies the reservations that instances can + consume from. + + This field is a member of `oneof`_ ``_reservation_affinity``. + resource_policies (Sequence[str]): + Resource policies (names, not ULRs) applied + to instances created from these properties. + scheduling (google.cloud.compute_v1.types.Scheduling): + Specifies the scheduling options for the + instances that are created from these + properties. + + This field is a member of `oneof`_ ``_scheduling``. + service_accounts (Sequence[google.cloud.compute_v1.types.ServiceAccount]): + A list of service accounts with specified + scopes. Access tokens for these service accounts + are available to the instances that are created + from these properties. Use metadata queries to + obtain the access tokens for these instances. + shielded_instance_config (google.cloud.compute_v1.types.ShieldedInstanceConfig): + + This field is a member of `oneof`_ ``_shielded_instance_config``. + tags (google.cloud.compute_v1.types.Tags): + A list of tags to apply to the instances that + are created from these properties. The tags + identify valid sources or targets for network + firewalls. The setTags method can modify this + list of tags. Each tag within the list must + comply with RFC1035. + + This field is a member of `oneof`_ ``_tags``. + """ + class PrivateIpv6GoogleAccess(proto.Enum): + r"""The private IPv6 google access type for VMs. If not specified, use + INHERIT_FROM_SUBNETWORK as default. + """ + UNDEFINED_PRIVATE_IPV6_GOOGLE_ACCESS = 0 + ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE = 427975994 + ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE = 288210263 + INHERIT_FROM_SUBNETWORK = 530256959 + + advanced_machine_features = proto.Field( + proto.MESSAGE, + number=409646002, + optional=True, + message='AdvancedMachineFeatures', + ) + can_ip_forward = proto.Field( + proto.BOOL, + number=467731324, + optional=True, + ) + confidential_instance_config = proto.Field( + proto.MESSAGE, + number=490637685, + optional=True, + message='ConfidentialInstanceConfig', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disks = proto.RepeatedField( + proto.MESSAGE, + number=95594102, + message='AttachedDisk', + ) + guest_accelerators = proto.RepeatedField( + proto.MESSAGE, + number=463595119, + message='AcceleratorConfig', + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + machine_type = proto.Field( + proto.STRING, + number=227711026, + optional=True, + ) + metadata = proto.Field( + proto.MESSAGE, + number=86866735, + optional=True, + message='Metadata', + ) + min_cpu_platform = proto.Field( + proto.STRING, + number=242912759, + optional=True, + ) + network_interfaces = proto.RepeatedField( + proto.MESSAGE, + number=52735243, + message='NetworkInterface', + ) + private_ipv6_google_access = proto.Field( + proto.STRING, + number=48277006, + optional=True, + ) + reservation_affinity = proto.Field( + proto.MESSAGE, + number=157850683, + optional=True, + message='ReservationAffinity', + ) + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + scheduling = proto.Field( + proto.MESSAGE, + number=386688404, + optional=True, + message='Scheduling', + ) + service_accounts = proto.RepeatedField( + proto.MESSAGE, + number=277537328, + message='ServiceAccount', + ) + shielded_instance_config = proto.Field( + proto.MESSAGE, + number=12862901, + optional=True, + message='ShieldedInstanceConfig', + ) + tags = proto.Field( + proto.MESSAGE, + number=3552281, + optional=True, + message='Tags', + ) + + +class InstanceReference(proto.Message): + r""" + + Attributes: + instance (str): + The URL for a specific instance. @required + compute.instancegroups.addInstances/removeInstances + + This field is a member of `oneof`_ ``_instance``. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + optional=True, + ) + + +class InstanceTemplate(proto.Message): + r"""Represents an Instance Template resource. You can use + instance templates to create VM instances and managed instance + groups. For more information, read Instance Templates. + + Attributes: + creation_timestamp (str): + [Output Only] The creation timestamp for this instance + template in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] A unique identifier for this instance + template. The server defines this identifier. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] The resource type, which is always + compute#instanceTemplate for instance templates. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource; provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + properties (google.cloud.compute_v1.types.InstanceProperties): + The instance properties for this instance + template. + + This field is a member of `oneof`_ ``_properties``. + self_link (str): + [Output Only] The URL for this instance template. The server + defines this URL. + + This field is a member of `oneof`_ ``_self_link``. + source_instance (str): + The source instance used to create the + template. You can provide this as a partial or + full URL to the resource. For example, the + following are valid values: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /instances/instance - + projects/project/zones/zone/instances/instance + + This field is a member of `oneof`_ ``_source_instance``. + source_instance_params (google.cloud.compute_v1.types.SourceInstanceParams): + The source instance params to use to create + this instance template. + + This field is a member of `oneof`_ ``_source_instance_params``. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + properties = proto.Field( + proto.MESSAGE, + number=147688755, + optional=True, + message='InstanceProperties', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + source_instance = proto.Field( + proto.STRING, + number=396315705, + optional=True, + ) + source_instance_params = proto.Field( + proto.MESSAGE, + number=135342156, + optional=True, + message='SourceInstanceParams', + ) + + +class InstanceTemplateList(proto.Message): + r"""A list of instance templates. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceTemplate]): + A list of InstanceTemplate resources. + kind (str): + [Output Only] The resource type, which is always + compute#instanceTemplatesListResponse for instance template + lists. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceTemplate', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstanceWithNamedPorts(proto.Message): + r""" + + Attributes: + instance (str): + [Output Only] The URL of the instance. + + This field is a member of `oneof`_ ``_instance``. + named_ports (Sequence[google.cloud.compute_v1.types.NamedPort]): + [Output Only] The named ports that belong to this instance + group. + status (str): + [Output Only] The status of the instance. Check the Status + enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the instance.""" + UNDEFINED_STATUS = 0 + DEPROVISIONING = 428935662 + PROVISIONING = 290896621 + REPAIRING = 413483285 + RUNNING = 121282975 + STAGING = 431072283 + STOPPED = 444276141 + STOPPING = 350791796 + SUSPENDED = 51223995 + SUSPENDING = 514206246 + TERMINATED = 250018339 + + instance = proto.Field( + proto.STRING, + number=18257045, + optional=True, + ) + named_ports = proto.RepeatedField( + proto.MESSAGE, + number=427598732, + message='NamedPort', + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class InstancesAddResourcePoliciesRequest(proto.Message): + r""" + + Attributes: + resource_policies (Sequence[str]): + Resource policies to be added to this + instance. + """ + + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + + +class InstancesGetEffectiveFirewallsResponse(proto.Message): + r""" + + Attributes: + firewall_policys (Sequence[google.cloud.compute_v1.types.InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy]): + Effective firewalls from firewall policies. + firewalls (Sequence[google.cloud.compute_v1.types.Firewall]): + Effective firewalls on the instance. + """ + + firewall_policys = proto.RepeatedField( + proto.MESSAGE, + number=410985794, + message='InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + ) + firewalls = proto.RepeatedField( + proto.MESSAGE, + number=272245619, + message='Firewall', + ) + + +class InstancesGetEffectiveFirewallsResponseEffectiveFirewallPolicy(proto.Message): + r""" + + Attributes: + display_name (str): + [Output Only] Deprecated, please use short name instead. The + display name of the firewall policy. + + This field is a member of `oneof`_ ``_display_name``. + name (str): + [Output Only] The name of the firewall policy. + + This field is a member of `oneof`_ ``_name``. + rules (Sequence[google.cloud.compute_v1.types.FirewallPolicyRule]): + The rules that apply to the network. + short_name (str): + [Output Only] The short name of the firewall policy. + + This field is a member of `oneof`_ ``_short_name``. + type_ (str): + [Output Only] The type of the firewall policy. Check the + Type enum for the list of possible values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""[Output Only] The type of the firewall policy.""" + UNDEFINED_TYPE = 0 + HIERARCHY = 69902869 + UNSPECIFIED = 526786327 + + display_name = proto.Field( + proto.STRING, + number=4473832, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + rules = proto.RepeatedField( + proto.MESSAGE, + number=108873975, + message='FirewallPolicyRule', + ) + short_name = proto.Field( + proto.STRING, + number=492051566, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class InstancesRemoveResourcePoliciesRequest(proto.Message): + r""" + + Attributes: + resource_policies (Sequence[str]): + Resource policies to be removed from this + instance. + """ + + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + + +class InstancesScopedList(proto.Message): + r""" + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.Instance]): + [Output Only] A list of instances contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of instances when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='Instance', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InstancesSetLabelsRequest(proto.Message): + r""" + + Attributes: + label_fingerprint (str): + Fingerprint of the previous set of labels for + this resource, used to prevent conflicts. + Provide the latest fingerprint value when making + a request to add or change labels. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.InstancesSetLabelsRequest.LabelsEntry]): + + """ + + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + + +class InstancesSetMachineResourcesRequest(proto.Message): + r""" + + Attributes: + guest_accelerators (Sequence[google.cloud.compute_v1.types.AcceleratorConfig]): + A list of the type and count of accelerator + cards attached to the instance. + """ + + guest_accelerators = proto.RepeatedField( + proto.MESSAGE, + number=463595119, + message='AcceleratorConfig', + ) + + +class InstancesSetMachineTypeRequest(proto.Message): + r""" + + Attributes: + machine_type (str): + Full or partial URL of the machine type + resource. See Machine Types for a full list of + machine types. For example: zones/us- + central1-f/machineTypes/n1-standard-1 + + This field is a member of `oneof`_ ``_machine_type``. + """ + + machine_type = proto.Field( + proto.STRING, + number=227711026, + optional=True, + ) + + +class InstancesSetMinCpuPlatformRequest(proto.Message): + r""" + + Attributes: + min_cpu_platform (str): + Minimum cpu/platform this instance should be + started at. + + This field is a member of `oneof`_ ``_min_cpu_platform``. + """ + + min_cpu_platform = proto.Field( + proto.STRING, + number=242912759, + optional=True, + ) + + +class InstancesSetServiceAccountRequest(proto.Message): + r""" + + Attributes: + email (str): + Email address of the service account. + + This field is a member of `oneof`_ ``_email``. + scopes (Sequence[str]): + The list of scopes to be made available for + this service account. + """ + + email = proto.Field( + proto.STRING, + number=96619420, + optional=True, + ) + scopes = proto.RepeatedField( + proto.STRING, + number=165973151, + ) + + +class InstancesStartWithEncryptionKeyRequest(proto.Message): + r""" + + Attributes: + disks (Sequence[google.cloud.compute_v1.types.CustomerEncryptionKeyProtectedDisk]): + Array of disks associated with this instance + that are protected with a customer-supplied + encryption key. In order to start the instance, + the disk url and its corresponding key must be + provided. If the disk is not protected with a + customer-supplied encryption key it should not + be specified. + """ + + disks = proto.RepeatedField( + proto.MESSAGE, + number=95594102, + message='CustomerEncryptionKeyProtectedDisk', + ) + + +class Int64RangeMatch(proto.Message): + r"""HttpRouteRuleMatch criteria for field values that must stay + within the specified integer range. + + Attributes: + range_end (int): + The end of the range (exclusive) in signed + long integer format. + + This field is a member of `oneof`_ ``_range_end``. + range_start (int): + The start of the range (inclusive) in signed + long integer format. + + This field is a member of `oneof`_ ``_range_start``. + """ + + range_end = proto.Field( + proto.INT64, + number=322439897, + optional=True, + ) + range_start = proto.Field( + proto.INT64, + number=103333600, + optional=True, + ) + + +class Interconnect(proto.Message): + r"""Represents an Interconnect resource. An Interconnect resource + is a dedicated connection between the GCP network and your on- + premises network. For more information, read the Dedicated + Interconnect Overview. + + Attributes: + admin_enabled (bool): + Administrative status of the interconnect. + When this is set to true, the Interconnect is + functional and can carry traffic. When set to + false, no packets can be carried over the + interconnect and no BGP routes are exchanged + over it. By default, the status is set to true. + + This field is a member of `oneof`_ ``_admin_enabled``. + circuit_infos (Sequence[google.cloud.compute_v1.types.InterconnectCircuitInfo]): + [Output Only] A list of CircuitInfo objects, that describe + the individual circuits in this LAG. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + customer_name (str): + Customer name, to put in the Letter of + Authorization as the party authorized to request + a crossconnect. + + This field is a member of `oneof`_ ``_customer_name``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + expected_outages (Sequence[google.cloud.compute_v1.types.InterconnectOutageNotification]): + [Output Only] A list of outages expected for this + Interconnect. + google_ip_address (str): + [Output Only] IP address configured on the Google side of + the Interconnect link. This can be used only for ping tests. + + This field is a member of `oneof`_ ``_google_ip_address``. + google_reference_id (str): + [Output Only] Google reference ID to be used when raising + support tickets with Google or otherwise to debug backend + connectivity issues. + + This field is a member of `oneof`_ ``_google_reference_id``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + interconnect_attachments (Sequence[str]): + [Output Only] A list of the URLs of all + InterconnectAttachments configured to use this Interconnect. + interconnect_type (str): + Type of interconnect, which can take one of the following + values: - PARTNER: A partner-managed interconnection shared + between customers though a partner. - DEDICATED: A dedicated + physical interconnection with the customer. Note that a + value IT_PRIVATE has been deprecated in favor of DEDICATED. + Check the InterconnectType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_interconnect_type``. + kind (str): + [Output Only] Type of the resource. Always + compute#interconnect for interconnects. + + This field is a member of `oneof`_ ``_kind``. + link_type (str): + Type of link requested, which can take one of the following + values: - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR + optics - LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR + optics. Note that this field indicates the speed of each of + the links in the bundle, not the speed of the entire bundle. + Check the LinkType enum for the list of possible values. + + This field is a member of `oneof`_ ``_link_type``. + location (str): + URL of the InterconnectLocation object that + represents where this connection is to be + provisioned. + + This field is a member of `oneof`_ ``_location``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + noc_contact_email (str): + Email address to contact the customer NOC for + operations and maintenance notifications + regarding this Interconnect. If specified, this + will be used for notifications in addition to + all other forms described, such as Stackdriver + logs alerting and Cloud Notifications. + + This field is a member of `oneof`_ ``_noc_contact_email``. + operational_status (str): + [Output Only] The current status of this Interconnect's + functionality, which can take one of the following values: - + OS_ACTIVE: A valid Interconnect, which is turned up and is + ready to use. Attachments may be provisioned on this + Interconnect. - OS_UNPROVISIONED: An Interconnect that has + not completed turnup. No attachments may be provisioned on + this Interconnect. - OS_UNDER_MAINTENANCE: An Interconnect + that is undergoing internal maintenance. No attachments may + be provisioned or updated on this Interconnect. Check the + OperationalStatus enum for the list of possible values. + + This field is a member of `oneof`_ ``_operational_status``. + peer_ip_address (str): + [Output Only] IP address configured on the customer side of + the Interconnect link. The customer should configure this IP + address during turnup when prompted by Google NOC. This can + be used only for ping tests. + + This field is a member of `oneof`_ ``_peer_ip_address``. + provisioned_link_count (int): + [Output Only] Number of links actually provisioned in this + interconnect. + + This field is a member of `oneof`_ ``_provisioned_link_count``. + requested_link_count (int): + Target number of physical links in the link + bundle, as requested by the customer. + + This field is a member of `oneof`_ ``_requested_link_count``. + satisfies_pzs (bool): + [Output Only] Set to true if the resource satisfies the zone + separation organization policy constraints and false + otherwise. Defaults to false if the field is not present. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + state (str): + [Output Only] The current state of Interconnect + functionality, which can take one of the following values: - + ACTIVE: The Interconnect is valid, turned up and ready to + use. Attachments may be provisioned on this Interconnect. - + UNPROVISIONED: The Interconnect has not completed turnup. No + attachments may be provisioned on this Interconnect. - + UNDER_MAINTENANCE: The Interconnect is undergoing internal + maintenance. No attachments may be provisioned or updated on + this Interconnect. Check the State enum for the list of + possible values. + + This field is a member of `oneof`_ ``_state``. + """ + class InterconnectType(proto.Enum): + r"""Type of interconnect, which can take one of the following values: - + PARTNER: A partner-managed interconnection shared between customers + though a partner. - DEDICATED: A dedicated physical interconnection + with the customer. Note that a value IT_PRIVATE has been deprecated + in favor of DEDICATED. + """ + UNDEFINED_INTERCONNECT_TYPE = 0 + DEDICATED = 258411983 + IT_PRIVATE = 335677007 + PARTNER = 461924520 + + class LinkType(proto.Enum): + r"""Type of link requested, which can take one of the following values: + - LINK_TYPE_ETHERNET_10G_LR: A 10G Ethernet with LR optics - + LINK_TYPE_ETHERNET_100G_LR: A 100G Ethernet with LR optics. Note + that this field indicates the speed of each of the links in the + bundle, not the speed of the entire bundle. + """ + UNDEFINED_LINK_TYPE = 0 + LINK_TYPE_ETHERNET_100G_LR = 337672551 + LINK_TYPE_ETHERNET_10G_LR = 236739749 + + class OperationalStatus(proto.Enum): + r"""[Output Only] The current status of this Interconnect's + functionality, which can take one of the following values: - + OS_ACTIVE: A valid Interconnect, which is turned up and is ready to + use. Attachments may be provisioned on this Interconnect. - + OS_UNPROVISIONED: An Interconnect that has not completed turnup. No + attachments may be provisioned on this Interconnect. - + OS_UNDER_MAINTENANCE: An Interconnect that is undergoing internal + maintenance. No attachments may be provisioned or updated on this + Interconnect. + """ + UNDEFINED_OPERATIONAL_STATUS = 0 + OS_ACTIVE = 55721409 + OS_UNPROVISIONED = 239771840 + + class State(proto.Enum): + r"""[Output Only] The current state of Interconnect functionality, which + can take one of the following values: - ACTIVE: The Interconnect is + valid, turned up and ready to use. Attachments may be provisioned on + this Interconnect. - UNPROVISIONED: The Interconnect has not + completed turnup. No attachments may be provisioned on this + Interconnect. - UNDER_MAINTENANCE: The Interconnect is undergoing + internal maintenance. No attachments may be provisioned or updated + on this Interconnect. + """ + UNDEFINED_STATE = 0 + ACTIVE = 314733318 + UNPROVISIONED = 517333979 + + admin_enabled = proto.Field( + proto.BOOL, + number=445675089, + optional=True, + ) + circuit_infos = proto.RepeatedField( + proto.MESSAGE, + number=164839855, + message='InterconnectCircuitInfo', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + customer_name = proto.Field( + proto.STRING, + number=3665484, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + expected_outages = proto.RepeatedField( + proto.MESSAGE, + number=264484123, + message='InterconnectOutageNotification', + ) + google_ip_address = proto.Field( + proto.STRING, + number=443105954, + optional=True, + ) + google_reference_id = proto.Field( + proto.STRING, + number=534944469, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + interconnect_attachments = proto.RepeatedField( + proto.STRING, + number=425388415, + ) + interconnect_type = proto.Field( + proto.STRING, + number=515165259, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + link_type = proto.Field( + proto.STRING, + number=523207775, + optional=True, + ) + location = proto.Field( + proto.STRING, + number=290430901, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + noc_contact_email = proto.Field( + proto.STRING, + number=14072832, + optional=True, + ) + operational_status = proto.Field( + proto.STRING, + number=201070847, + optional=True, + ) + peer_ip_address = proto.Field( + proto.STRING, + number=207735769, + optional=True, + ) + provisioned_link_count = proto.Field( + proto.INT32, + number=410888565, + optional=True, + ) + requested_link_count = proto.Field( + proto.INT32, + number=45051387, + optional=True, + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + + +class InterconnectAttachment(proto.Message): + r"""Represents an Interconnect Attachment (VLAN) resource. You + can use Interconnect attachments (VLANS) to connect your Virtual + Private Cloud networks to your on-premises networks through an + Interconnect. For more information, read Creating VLAN + Attachments. + + Attributes: + admin_enabled (bool): + Determines whether this Attachment will carry packets. Not + present for PARTNER_PROVIDER. + + This field is a member of `oneof`_ ``_admin_enabled``. + bandwidth (str): + Provisioned bandwidth capacity for the interconnect + attachment. For attachments of type DEDICATED, the user can + set the bandwidth. For attachments of type PARTNER, the + Google Partner that is operating the interconnect must set + the bandwidth. Output only for PARTNER type, mutable for + PARTNER_PROVIDER and DEDICATED, and can take one of the + following values: - BPS_50M: 50 Mbit/s - BPS_100M: 100 + Mbit/s - BPS_200M: 200 Mbit/s - BPS_300M: 300 Mbit/s - + BPS_400M: 400 Mbit/s - BPS_500M: 500 Mbit/s - BPS_1G: 1 + Gbit/s - BPS_2G: 2 Gbit/s - BPS_5G: 5 Gbit/s - BPS_10G: 10 + Gbit/s - BPS_20G: 20 Gbit/s - BPS_50G: 50 Gbit/s Check the + Bandwidth enum for the list of possible values. + + This field is a member of `oneof`_ ``_bandwidth``. + candidate_subnets (Sequence[str]): + Up to 16 candidate prefixes that can be used + to restrict the allocation of + cloudRouterIpAddress and customerRouterIpAddress + for this attachment. All prefixes must be within + link-local address space (169.254.0.0/16) and + must be /29 or shorter (/28, /27, etc). Google + will attempt to select an unused /29 from the + supplied candidate prefix(es). The request will + fail if all possible /29s are in use on Google's + edge. If not supplied, Google will randomly + select an unused /29 from all of link-local + space. + cloud_router_ip_address (str): + [Output Only] IPv4 address + prefix length to be configured + on Cloud Router Interface for this interconnect attachment. + + This field is a member of `oneof`_ ``_cloud_router_ip_address``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + customer_router_ip_address (str): + [Output Only] IPv4 address + prefix length to be configured + on the customer router subinterface for this interconnect + attachment. + + This field is a member of `oneof`_ ``_customer_router_ip_address``. + dataplane_version (int): + [Output Only] Dataplane version for this + InterconnectAttachment. + + This field is a member of `oneof`_ ``_dataplane_version``. + description (str): + An optional description of this resource. + + This field is a member of `oneof`_ ``_description``. + edge_availability_domain (str): + Desired availability domain for the attachment. Only + available for type PARTNER, at creation time, and can take + one of the following values: - AVAILABILITY_DOMAIN_ANY - + AVAILABILITY_DOMAIN_1 - AVAILABILITY_DOMAIN_2 For improved + reliability, customers should configure a pair of + attachments, one per availability domain. The selected + availability domain will be provided to the Partner via the + pairing key, so that the provisioned circuit will lie in the + specified domain. If not specified, the value will default + to AVAILABILITY_DOMAIN_ANY. Check the EdgeAvailabilityDomain + enum for the list of possible values. + + This field is a member of `oneof`_ ``_edge_availability_domain``. + encryption (str): + Indicates the user-supplied encryption option of this VLAN + attachment (interconnectAttachment). Can only be specified + at attachment creation for PARTNER or DEDICATED attachments. + Possible values are: - NONE - This is the default value, + which means that the VLAN attachment carries unencrypted + traffic. VMs are able to send traffic to, or receive traffic + from, such a VLAN attachment. - IPSEC - The VLAN attachment + carries only encrypted traffic that is encrypted by an IPsec + device, such as an HA VPN gateway or third-party IPsec VPN. + VMs cannot directly send traffic to, or receive traffic + from, such a VLAN attachment. To use *IPsec-encrypted Cloud + Interconnect*, the VLAN attachment must be created with this + option. Not currently available publicly. Check the + Encryption enum for the list of possible values. + + This field is a member of `oneof`_ ``_encryption``. + google_reference_id (str): + [Output Only] Google reference ID, to be used when raising + support tickets with Google or otherwise to debug backend + connectivity issues. [Deprecated] This field is not used. + + This field is a member of `oneof`_ ``_google_reference_id``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + interconnect (str): + URL of the underlying Interconnect object + that this attachment's traffic will traverse + through. + + This field is a member of `oneof`_ ``_interconnect``. + ipsec_internal_addresses (Sequence[str]): + A list of URLs of addresses that have been + reserved for the VLAN attachment. Used only for + the VLAN attachment that has the encryption + option as IPSEC. The addresses must be regional + internal IP address ranges. When creating an HA + VPN gateway over the VLAN attachment, if the + attachment is configured to use a regional + internal IP address, then the VPN gateway's IP + address is allocated from the IP address range + specified here. For example, if the HA VPN + gateway's interface 0 is paired to this VLAN + attachment, then a regional internal IP address + for the VPN gateway interface 0 will be + allocated from the IP address specified for this + VLAN attachment. If this field is not specified + when creating the VLAN attachment, then later on + when creating an HA VPN gateway on this VLAN + attachment, the HA VPN gateway's IP address is + allocated from the regional external IP address + pool. Not currently available publicly. + kind (str): + [Output Only] Type of the resource. Always + compute#interconnectAttachment for interconnect attachments. + + This field is a member of `oneof`_ ``_kind``. + mtu (int): + Maximum Transmission Unit (MTU), in bytes, of + packets passing through this interconnect + attachment. Only 1440 and 1500 are allowed. If + not specified, the value will default to 1440. + + This field is a member of `oneof`_ ``_mtu``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + operational_status (str): + [Output Only] The current status of whether or not this + interconnect attachment is functional, which can take one of + the following values: - OS_ACTIVE: The attachment has been + turned up and is ready to use. - OS_UNPROVISIONED: The + attachment is not ready to use yet, because turnup is not + complete. Check the OperationalStatus enum for the list of + possible values. + + This field is a member of `oneof`_ ``_operational_status``. + pairing_key (str): + [Output only for type PARTNER. Input only for + PARTNER_PROVIDER. Not present for DEDICATED]. The opaque + identifier of an PARTNER attachment used to initiate + provisioning with a selected partner. Of the form + "XXXXX/region/domain". + + This field is a member of `oneof`_ ``_pairing_key``. + partner_asn (int): + Optional BGP ASN for the router supplied by a Layer 3 + Partner if they configured BGP on behalf of the customer. + Output only for PARTNER type, input only for + PARTNER_PROVIDER, not available for DEDICATED. + + This field is a member of `oneof`_ ``_partner_asn``. + partner_metadata (google.cloud.compute_v1.types.InterconnectAttachmentPartnerMetadata): + Informational metadata about Partner attachments from + Partners to display to customers. Output only for for + PARTNER type, mutable for PARTNER_PROVIDER, not available + for DEDICATED. + + This field is a member of `oneof`_ ``_partner_metadata``. + private_interconnect_info (google.cloud.compute_v1.types.InterconnectAttachmentPrivateInfo): + [Output Only] Information specific to an + InterconnectAttachment. This property is populated if the + interconnect that this is attached to is of type DEDICATED. + + This field is a member of `oneof`_ ``_private_interconnect_info``. + region (str): + [Output Only] URL of the region where the regional + interconnect attachment resides. You must specify this field + as part of the HTTP request URL. It is not settable as a + field in the request body. + + This field is a member of `oneof`_ ``_region``. + router (str): + URL of the Cloud Router to be used for + dynamic routing. This router must be in the same + region as this InterconnectAttachment. The + InterconnectAttachment will automatically + connect the Interconnect to the network & region + within which the Cloud Router is configured. + + This field is a member of `oneof`_ ``_router``. + satisfies_pzs (bool): + [Output Only] Set to true if the resource satisfies the zone + separation organization policy constraints and false + otherwise. Defaults to false if the field is not present. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + state (str): + [Output Only] The current state of this attachment's + functionality. Enum values ACTIVE and UNPROVISIONED are + shared by DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER + interconnect attachments, while enum values PENDING_PARTNER, + PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for + only PARTNER and PARTNER_PROVIDER interconnect attachments. + This state can take one of the following values: - ACTIVE: + The attachment has been turned up and is ready to use. - + UNPROVISIONED: The attachment is not ready to use yet, + because turnup is not complete. - PENDING_PARTNER: A + newly-created PARTNER attachment that has not yet been + configured on the Partner side. - PARTNER_REQUEST_RECEIVED: + A PARTNER attachment is in the process of provisioning after + a PARTNER_PROVIDER attachment was created that references + it. - PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER + attachment that is waiting for a customer to activate it. - + DEFUNCT: The attachment was deleted externally and is no + longer functional. This could be because the associated + Interconnect was removed, or because the other side of a + Partner attachment was deleted. Check the State enum for the + list of possible values. + + This field is a member of `oneof`_ ``_state``. + type_ (str): + The type of interconnect attachment this is, which can take + one of the following values: - DEDICATED: an attachment to a + Dedicated Interconnect. - PARTNER: an attachment to a + Partner Interconnect, created by the customer. - + PARTNER_PROVIDER: an attachment to a Partner Interconnect, + created by the partner. Check the Type enum for the list of + possible values. + + This field is a member of `oneof`_ ``_type``. + vlan_tag8021q (int): + The IEEE 802.1Q VLAN tag for this attachment, + in the range 2-4094. Only specified at creation + time. + + This field is a member of `oneof`_ ``_vlan_tag8021q``. + """ + class Bandwidth(proto.Enum): + r"""Provisioned bandwidth capacity for the interconnect attachment. For + attachments of type DEDICATED, the user can set the bandwidth. For + attachments of type PARTNER, the Google Partner that is operating + the interconnect must set the bandwidth. Output only for PARTNER + type, mutable for PARTNER_PROVIDER and DEDICATED, and can take one + of the following values: - BPS_50M: 50 Mbit/s - BPS_100M: 100 Mbit/s + - BPS_200M: 200 Mbit/s - BPS_300M: 300 Mbit/s - BPS_400M: 400 Mbit/s + - BPS_500M: 500 Mbit/s - BPS_1G: 1 Gbit/s - BPS_2G: 2 Gbit/s - + BPS_5G: 5 Gbit/s - BPS_10G: 10 Gbit/s - BPS_20G: 20 Gbit/s - + BPS_50G: 50 Gbit/s + """ + UNDEFINED_BANDWIDTH = 0 + BPS_100M = 49547958 + BPS_10G = 278693006 + BPS_1G = 355358448 + BPS_200M = 49577749 + BPS_20G = 278693967 + BPS_2G = 355358479 + BPS_300M = 49607540 + BPS_400M = 49637331 + BPS_500M = 49667122 + BPS_50G = 278696850 + BPS_50M = 278696856 + BPS_5G = 355358572 + + class EdgeAvailabilityDomain(proto.Enum): + r"""Desired availability domain for the attachment. Only available for + type PARTNER, at creation time, and can take one of the following + values: - AVAILABILITY_DOMAIN_ANY - AVAILABILITY_DOMAIN_1 - + AVAILABILITY_DOMAIN_2 For improved reliability, customers should + configure a pair of attachments, one per availability domain. The + selected availability domain will be provided to the Partner via the + pairing key, so that the provisioned circuit will lie in the + specified domain. If not specified, the value will default to + AVAILABILITY_DOMAIN_ANY. + """ + UNDEFINED_EDGE_AVAILABILITY_DOMAIN = 0 + AVAILABILITY_DOMAIN_1 = 349552090 + AVAILABILITY_DOMAIN_2 = 349552091 + AVAILABILITY_DOMAIN_ANY = 375256373 + + class Encryption(proto.Enum): + r"""Indicates the user-supplied encryption option of this VLAN + attachment (interconnectAttachment). Can only be specified at + attachment creation for PARTNER or DEDICATED attachments. Possible + values are: - NONE - This is the default value, which means that the + VLAN attachment carries unencrypted traffic. VMs are able to send + traffic to, or receive traffic from, such a VLAN attachment. - IPSEC + - The VLAN attachment carries only encrypted traffic that is + encrypted by an IPsec device, such as an HA VPN gateway or + third-party IPsec VPN. VMs cannot directly send traffic to, or + receive traffic from, such a VLAN attachment. To use + *IPsec-encrypted Cloud Interconnect*, the VLAN attachment must be + created with this option. Not currently available publicly. + """ + UNDEFINED_ENCRYPTION = 0 + IPSEC = 69882282 + NONE = 2402104 + + class OperationalStatus(proto.Enum): + r"""[Output Only] The current status of whether or not this interconnect + attachment is functional, which can take one of the following + values: - OS_ACTIVE: The attachment has been turned up and is ready + to use. - OS_UNPROVISIONED: The attachment is not ready to use yet, + because turnup is not complete. + """ + UNDEFINED_OPERATIONAL_STATUS = 0 + OS_ACTIVE = 55721409 + OS_UNPROVISIONED = 239771840 + + class State(proto.Enum): + r"""[Output Only] The current state of this attachment's functionality. + Enum values ACTIVE and UNPROVISIONED are shared by + DEDICATED/PRIVATE, PARTNER, and PARTNER_PROVIDER interconnect + attachments, while enum values PENDING_PARTNER, + PARTNER_REQUEST_RECEIVED, and PENDING_CUSTOMER are used for only + PARTNER and PARTNER_PROVIDER interconnect attachments. This state + can take one of the following values: - ACTIVE: The attachment has + been turned up and is ready to use. - UNPROVISIONED: The attachment + is not ready to use yet, because turnup is not complete. - + PENDING_PARTNER: A newly-created PARTNER attachment that has not yet + been configured on the Partner side. - PARTNER_REQUEST_RECEIVED: A + PARTNER attachment is in the process of provisioning after a + PARTNER_PROVIDER attachment was created that references it. - + PENDING_CUSTOMER: A PARTNER or PARTNER_PROVIDER attachment that is + waiting for a customer to activate it. - DEFUNCT: The attachment was + deleted externally and is no longer functional. This could be + because the associated Interconnect was removed, or because the + other side of a Partner attachment was deleted. + """ + UNDEFINED_STATE = 0 + ACTIVE = 314733318 + DEFUNCT = 115891759 + PARTNER_REQUEST_RECEIVED = 513587304 + PENDING_CUSTOMER = 167494054 + PENDING_PARTNER = 387890656 + STATE_UNSPECIFIED = 470755401 + UNPROVISIONED = 517333979 + + class Type(proto.Enum): + r"""The type of interconnect attachment this is, which can take one of + the following values: - DEDICATED: an attachment to a Dedicated + Interconnect. - PARTNER: an attachment to a Partner Interconnect, + created by the customer. - PARTNER_PROVIDER: an attachment to a + Partner Interconnect, created by the partner. + """ + UNDEFINED_TYPE = 0 + DEDICATED = 258411983 + PARTNER = 461924520 + PARTNER_PROVIDER = 483261352 + + admin_enabled = proto.Field( + proto.BOOL, + number=445675089, + optional=True, + ) + bandwidth = proto.Field( + proto.STRING, + number=181715121, + optional=True, + ) + candidate_subnets = proto.RepeatedField( + proto.STRING, + number=237842938, + ) + cloud_router_ip_address = proto.Field( + proto.STRING, + number=287392776, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + customer_router_ip_address = proto.Field( + proto.STRING, + number=332475761, + optional=True, + ) + dataplane_version = proto.Field( + proto.INT32, + number=34920075, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + edge_availability_domain = proto.Field( + proto.STRING, + number=71289510, + optional=True, + ) + encryption = proto.Field( + proto.STRING, + number=97980291, + optional=True, + ) + google_reference_id = proto.Field( + proto.STRING, + number=534944469, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + interconnect = proto.Field( + proto.STRING, + number=224601230, + optional=True, + ) + ipsec_internal_addresses = proto.RepeatedField( + proto.STRING, + number=407648565, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + mtu = proto.Field( + proto.INT32, + number=108462, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + operational_status = proto.Field( + proto.STRING, + number=201070847, + optional=True, + ) + pairing_key = proto.Field( + proto.STRING, + number=439695464, + optional=True, + ) + partner_asn = proto.Field( + proto.INT64, + number=438166149, + optional=True, + ) + partner_metadata = proto.Field( + proto.MESSAGE, + number=65908934, + optional=True, + message='InterconnectAttachmentPartnerMetadata', + ) + private_interconnect_info = proto.Field( + proto.MESSAGE, + number=237270531, + optional=True, + message='InterconnectAttachmentPrivateInfo', + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + router = proto.Field( + proto.STRING, + number=148608841, + optional=True, + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + vlan_tag8021q = proto.Field( + proto.INT32, + number=119927836, + optional=True, + ) + + +class InterconnectAttachmentAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InterconnectAttachmentAggregatedList.ItemsEntry]): + A list of InterconnectAttachmentsScopedList + resources. + kind (str): + [Output Only] Type of resource. Always + compute#interconnectAttachmentAggregatedList for aggregated + lists of interconnect attachments. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='InterconnectAttachmentsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InterconnectAttachmentList(proto.Message): + r"""Response to the list request, and contains a list of + interconnect attachments. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InterconnectAttachment]): + A list of InterconnectAttachment resources. + kind (str): + [Output Only] Type of resource. Always + compute#interconnectAttachmentList for lists of interconnect + attachments. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InterconnectAttachment', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InterconnectAttachmentPartnerMetadata(proto.Message): + r"""Informational metadata about Partner attachments from Partners to + display to customers. These fields are propagated from + PARTNER_PROVIDER attachments to their corresponding PARTNER + attachments. + + Attributes: + interconnect_name (str): + Plain text name of the Interconnect this + attachment is connected to, as displayed in the + Partner's portal. For instance "Chicago 1". This + value may be validated to match approved Partner + values. + + This field is a member of `oneof`_ ``_interconnect_name``. + partner_name (str): + Plain text name of the Partner providing this + attachment. This value may be validated to match + approved Partner values. + + This field is a member of `oneof`_ ``_partner_name``. + portal_url (str): + URL of the Partner's portal for this + Attachment. Partners may customise this to be a + deep link to the specific resource on the + Partner portal. This value may be validated to + match approved Partner values. + + This field is a member of `oneof`_ ``_portal_url``. + """ + + interconnect_name = proto.Field( + proto.STRING, + number=514963356, + optional=True, + ) + partner_name = proto.Field( + proto.STRING, + number=161747874, + optional=True, + ) + portal_url = proto.Field( + proto.STRING, + number=269182748, + optional=True, + ) + + +class InterconnectAttachmentPrivateInfo(proto.Message): + r"""Information for an interconnect attachment when this belongs + to an interconnect of type DEDICATED. + + Attributes: + tag8021q (int): + [Output Only] 802.1q encapsulation tag to be used for + traffic between Google and the customer, going to and from + this network and region. + + This field is a member of `oneof`_ ``_tag8021q``. + """ + + tag8021q = proto.Field( + proto.UINT32, + number=271820992, + optional=True, + ) + + +class InterconnectAttachmentsScopedList(proto.Message): + r""" + + Attributes: + interconnect_attachments (Sequence[google.cloud.compute_v1.types.InterconnectAttachment]): + A list of interconnect attachments contained + in this scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of addresses when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + interconnect_attachments = proto.RepeatedField( + proto.MESSAGE, + number=425388415, + message='InterconnectAttachment', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InterconnectCircuitInfo(proto.Message): + r"""Describes a single physical circuit between the Customer and + Google. CircuitInfo objects are created by Google, so all fields + are output only. + + Attributes: + customer_demarc_id (str): + Customer-side demarc ID for this circuit. + + This field is a member of `oneof`_ ``_customer_demarc_id``. + google_circuit_id (str): + Google-assigned unique ID for this circuit. + Assigned at circuit turn-up. + + This field is a member of `oneof`_ ``_google_circuit_id``. + google_demarc_id (str): + Google-side demarc ID for this circuit. + Assigned at circuit turn-up and provided by + Google to the customer in the LOA. + + This field is a member of `oneof`_ ``_google_demarc_id``. + """ + + customer_demarc_id = proto.Field( + proto.STRING, + number=28771859, + optional=True, + ) + google_circuit_id = proto.Field( + proto.STRING, + number=262014711, + optional=True, + ) + google_demarc_id = proto.Field( + proto.STRING, + number=448196270, + optional=True, + ) + + +class InterconnectDiagnostics(proto.Message): + r"""Diagnostics information about interconnect, contains detailed + and current technical information about Google's side of the + connection. + + Attributes: + arp_caches (Sequence[google.cloud.compute_v1.types.InterconnectDiagnosticsARPEntry]): + A list of InterconnectDiagnostics.ARPEntry + objects, describing individual neighbors + currently seen by the Google router in the ARP + cache for the Interconnect. This will be empty + when the Interconnect is not bundled. + links (Sequence[google.cloud.compute_v1.types.InterconnectDiagnosticsLinkStatus]): + A list of InterconnectDiagnostics.LinkStatus + objects, describing the status for each link on + the Interconnect. + mac_address (str): + The MAC address of the Interconnect's bundle + interface. + + This field is a member of `oneof`_ ``_mac_address``. + """ + + arp_caches = proto.RepeatedField( + proto.MESSAGE, + number=414591761, + message='InterconnectDiagnosticsARPEntry', + ) + links = proto.RepeatedField( + proto.MESSAGE, + number=102977465, + message='InterconnectDiagnosticsLinkStatus', + ) + mac_address = proto.Field( + proto.STRING, + number=332540164, + optional=True, + ) + + +class InterconnectDiagnosticsARPEntry(proto.Message): + r"""Describing the ARP neighbor entries seen on this link + + Attributes: + ip_address (str): + The IP address of this ARP neighbor. + + This field is a member of `oneof`_ ``_ip_address``. + mac_address (str): + The MAC address of this ARP neighbor. + + This field is a member of `oneof`_ ``_mac_address``. + """ + + ip_address = proto.Field( + proto.STRING, + number=406272220, + optional=True, + ) + mac_address = proto.Field( + proto.STRING, + number=332540164, + optional=True, + ) + + +class InterconnectDiagnosticsLinkLACPStatus(proto.Message): + r""" + + Attributes: + google_system_id (str): + System ID of the port on Google's side of the + LACP exchange. + + This field is a member of `oneof`_ ``_google_system_id``. + neighbor_system_id (str): + System ID of the port on the neighbor's side + of the LACP exchange. + + This field is a member of `oneof`_ ``_neighbor_system_id``. + state (str): + The state of a LACP link, which can take one + of the following values: - ACTIVE: The link is + configured and active within the bundle. - + DETACHED: The link is not configured within the + bundle. This means that the rest of the object + should be empty. Check the State enum for the + list of possible values. + + This field is a member of `oneof`_ ``_state``. + """ + class State(proto.Enum): + r"""The state of a LACP link, which can take one of the following + values: - ACTIVE: The link is configured and active within the + bundle. - DETACHED: The link is not configured within the + bundle. This means that the rest of the object should be empty. + """ + UNDEFINED_STATE = 0 + ACTIVE = 314733318 + DETACHED = 216562546 + + google_system_id = proto.Field( + proto.STRING, + number=91210405, + optional=True, + ) + neighbor_system_id = proto.Field( + proto.STRING, + number=343821342, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + + +class InterconnectDiagnosticsLinkOpticalPower(proto.Message): + r""" + + Attributes: + state (str): + The status of the current value when compared to the warning + and alarm levels for the receiving or transmitting + transceiver. Possible states include: - OK: The value has + not crossed a warning threshold. - LOW_WARNING: The value + has crossed below the low warning threshold. - HIGH_WARNING: + The value has crossed above the high warning threshold. - + LOW_ALARM: The value has crossed below the low alarm + threshold. - HIGH_ALARM: The value has crossed above the + high alarm threshold. Check the State enum for the list of + possible values. + + This field is a member of `oneof`_ ``_state``. + value (float): + Value of the current receiving or + transmitting optical power, read in dBm. Take a + known good optical value, give it a 10% margin + and trigger warnings relative to that value. In + general, a -7dBm warning and a -11dBm alarm are + good optical value estimates for most links. + + This field is a member of `oneof`_ ``_value``. + """ + class State(proto.Enum): + r"""The status of the current value when compared to the warning and + alarm levels for the receiving or transmitting transceiver. Possible + states include: - OK: The value has not crossed a warning threshold. + - LOW_WARNING: The value has crossed below the low warning + threshold. - HIGH_WARNING: The value has crossed above the high + warning threshold. - LOW_ALARM: The value has crossed below the low + alarm threshold. - HIGH_ALARM: The value has crossed above the high + alarm threshold. + """ + UNDEFINED_STATE = 0 + HIGH_ALARM = 305363284 + HIGH_WARNING = 220984799 + LOW_ALARM = 316659046 + LOW_WARNING = 338793841 + OK = 2524 + + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + value = proto.Field( + proto.FLOAT, + number=111972721, + optional=True, + ) + + +class InterconnectDiagnosticsLinkStatus(proto.Message): + r""" + + Attributes: + arp_caches (Sequence[google.cloud.compute_v1.types.InterconnectDiagnosticsARPEntry]): + A list of InterconnectDiagnostics.ARPEntry + objects, describing the ARP neighbor entries + seen on this link. This will be empty if the + link is bundled + circuit_id (str): + The unique ID for this link assigned during + turn up by Google. + + This field is a member of `oneof`_ ``_circuit_id``. + google_demarc (str): + The Demarc address assigned by Google and + provided in the LoA. + + This field is a member of `oneof`_ ``_google_demarc``. + lacp_status (google.cloud.compute_v1.types.InterconnectDiagnosticsLinkLACPStatus): + + This field is a member of `oneof`_ ``_lacp_status``. + receiving_optical_power (google.cloud.compute_v1.types.InterconnectDiagnosticsLinkOpticalPower): + An InterconnectDiagnostics.LinkOpticalPower + object, describing the current value and status + of the received light level. + + This field is a member of `oneof`_ ``_receiving_optical_power``. + transmitting_optical_power (google.cloud.compute_v1.types.InterconnectDiagnosticsLinkOpticalPower): + An InterconnectDiagnostics.LinkOpticalPower + object, describing the current value and status + of the transmitted light level. + + This field is a member of `oneof`_ ``_transmitting_optical_power``. + """ + + arp_caches = proto.RepeatedField( + proto.MESSAGE, + number=414591761, + message='InterconnectDiagnosticsARPEntry', + ) + circuit_id = proto.Field( + proto.STRING, + number=225180977, + optional=True, + ) + google_demarc = proto.Field( + proto.STRING, + number=51084, + optional=True, + ) + lacp_status = proto.Field( + proto.MESSAGE, + number=361210415, + optional=True, + message='InterconnectDiagnosticsLinkLACPStatus', + ) + receiving_optical_power = proto.Field( + proto.MESSAGE, + number=244717279, + optional=True, + message='InterconnectDiagnosticsLinkOpticalPower', + ) + transmitting_optical_power = proto.Field( + proto.MESSAGE, + number=459431197, + optional=True, + message='InterconnectDiagnosticsLinkOpticalPower', + ) + + +class InterconnectList(proto.Message): + r"""Response to the list request, and contains a list of + interconnects. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Interconnect]): + A list of Interconnect resources. + kind (str): + [Output Only] Type of resource. Always + compute#interconnectList for lists of interconnects. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Interconnect', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InterconnectLocation(proto.Message): + r"""Represents an Interconnect Attachment (VLAN) Location + resource. You can use this resource to find location details + about an Interconnect attachment (VLAN). For more information + about interconnect attachments, read Creating VLAN Attachments. + + Attributes: + address (str): + [Output Only] The postal address of the Point of Presence, + each line in the address is separated by a newline + character. + + This field is a member of `oneof`_ ``_address``. + availability_zone (str): + [Output Only] Availability zone for this + InterconnectLocation. Within a metropolitan area (metro), + maintenance will not be simultaneously scheduled in more + than one availability zone. Example: "zone1" or "zone2". + + This field is a member of `oneof`_ ``_availability_zone``. + city (str): + [Output Only] Metropolitan area designator that indicates + which city an interconnect is located. For example: + "Chicago, IL", "Amsterdam, Netherlands". + + This field is a member of `oneof`_ ``_city``. + continent (str): + [Output Only] Continent for this location, which can take + one of the following values: - AFRICA - ASIA_PAC - EUROPE - + NORTH_AMERICA - SOUTH_AMERICA Check the Continent enum for + the list of possible values. + + This field is a member of `oneof`_ ``_continent``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + [Output Only] An optional description of the resource. + + This field is a member of `oneof`_ ``_description``. + facility_provider (str): + [Output Only] The name of the provider for this facility + (e.g., EQUINIX). + + This field is a member of `oneof`_ ``_facility_provider``. + facility_provider_facility_id (str): + [Output Only] A provider-assigned Identifier for this + facility (e.g., Ashburn-DC1). + + This field is a member of `oneof`_ ``_facility_provider_facility_id``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#interconnectLocation for interconnect locations. + + This field is a member of `oneof`_ ``_kind``. + name (str): + [Output Only] Name of the resource. + + This field is a member of `oneof`_ ``_name``. + peeringdb_facility_id (str): + [Output Only] The peeringdb identifier for this facility + (corresponding with a netfac type in peeringdb). + + This field is a member of `oneof`_ ``_peeringdb_facility_id``. + region_infos (Sequence[google.cloud.compute_v1.types.InterconnectLocationRegionInfo]): + [Output Only] A list of InterconnectLocation.RegionInfo + objects, that describe parameters pertaining to the relation + between this InterconnectLocation and various Google Cloud + regions. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + status (str): + [Output Only] The status of this InterconnectLocation, which + can take one of the following values: - CLOSED: The + InterconnectLocation is closed and is unavailable for + provisioning new Interconnects. - AVAILABLE: The + InterconnectLocation is available for provisioning new + Interconnects. Check the Status enum for the list of + possible values. + + This field is a member of `oneof`_ ``_status``. + supports_pzs (bool): + [Output Only] Set to true for locations that support + physical zone separation. Defaults to false if the field is + not present. + + This field is a member of `oneof`_ ``_supports_pzs``. + """ + class Continent(proto.Enum): + r"""[Output Only] Continent for this location, which can take one of the + following values: - AFRICA - ASIA_PAC - EUROPE - NORTH_AMERICA - + SOUTH_AMERICA + """ + UNDEFINED_CONTINENT = 0 + AFRICA = 317443706 + ASIA_PAC = 119782269 + C_AFRICA = 71993846 + C_ASIA_PAC = 465668089 + C_EUROPE = 200369438 + C_NORTH_AMERICA = 275697048 + C_SOUTH_AMERICA = 397149792 + EUROPE = 445819298 + NORTH_AMERICA = 448015508 + SOUTH_AMERICA = 32597340 + + class Status(proto.Enum): + r"""[Output Only] The status of this InterconnectLocation, which can + take one of the following values: - CLOSED: The InterconnectLocation + is closed and is unavailable for provisioning new Interconnects. - + AVAILABLE: The InterconnectLocation is available for provisioning + new Interconnects. + """ + UNDEFINED_STATUS = 0 + AVAILABLE = 442079913 + CLOSED = 380163436 + + address = proto.Field( + proto.STRING, + number=462920692, + optional=True, + ) + availability_zone = proto.Field( + proto.STRING, + number=158459920, + optional=True, + ) + city = proto.Field( + proto.STRING, + number=3053931, + optional=True, + ) + continent = proto.Field( + proto.STRING, + number=133442996, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + facility_provider = proto.Field( + proto.STRING, + number=533303309, + optional=True, + ) + facility_provider_facility_id = proto.Field( + proto.STRING, + number=87269125, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + peeringdb_facility_id = proto.Field( + proto.STRING, + number=536567094, + optional=True, + ) + region_infos = proto.RepeatedField( + proto.MESSAGE, + number=312194170, + message='InterconnectLocationRegionInfo', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + supports_pzs = proto.Field( + proto.BOOL, + number=83983214, + optional=True, + ) + + +class InterconnectLocationList(proto.Message): + r"""Response to the list request, and contains a list of + interconnect locations. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InterconnectLocation]): + A list of InterconnectLocation resources. + kind (str): + [Output Only] Type of resource. Always + compute#interconnectLocationList for lists of interconnect + locations. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InterconnectLocation', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class InterconnectLocationRegionInfo(proto.Message): + r"""Information about any potential InterconnectAttachments + between an Interconnect at a specific InterconnectLocation, and + a specific Cloud Region. + + Attributes: + expected_rtt_ms (int): + Expected round-trip time in milliseconds, + from this InterconnectLocation to a VM in this + region. + + This field is a member of `oneof`_ ``_expected_rtt_ms``. + location_presence (str): + Identifies the network presence of this + location. Check the LocationPresence enum for + the list of possible values. + + This field is a member of `oneof`_ ``_location_presence``. + region (str): + URL for the region of this location. + + This field is a member of `oneof`_ ``_region``. + """ + class LocationPresence(proto.Enum): + r"""Identifies the network presence of this location.""" + UNDEFINED_LOCATION_PRESENCE = 0 + GLOBAL = 494663587 + LOCAL_REGION = 403535464 + LP_GLOBAL = 429584062 + LP_LOCAL_REGION = 488598851 + + expected_rtt_ms = proto.Field( + proto.INT64, + number=422543866, + optional=True, + ) + location_presence = proto.Field( + proto.STRING, + number=101517893, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + + +class InterconnectOutageNotification(proto.Message): + r"""Description of a planned outage on this Interconnect. + + Attributes: + affected_circuits (Sequence[str]): + If issue_type is IT_PARTIAL_OUTAGE, a list of the + Google-side circuit IDs that will be affected. + description (str): + A description about the purpose of the + outage. + + This field is a member of `oneof`_ ``_description``. + end_time (int): + Scheduled end time for the outage + (milliseconds since Unix epoch). + + This field is a member of `oneof`_ ``_end_time``. + issue_type (str): + Form this outage is expected to take, which can take one of + the following values: - OUTAGE: The Interconnect may be + completely out of service for some or all of the specified + window. - PARTIAL_OUTAGE: Some circuits comprising the + Interconnect as a whole should remain up, but with reduced + bandwidth. Note that the versions of this enum prefixed with + "IT_" have been deprecated in favor of the unprefixed + values. Check the IssueType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_issue_type``. + name (str): + Unique identifier for this outage + notification. + + This field is a member of `oneof`_ ``_name``. + source (str): + The party that generated this notification, which can take + the following value: - GOOGLE: this notification as + generated by Google. Note that the value of NSRC_GOOGLE has + been deprecated in favor of GOOGLE. Check the Source enum + for the list of possible values. + + This field is a member of `oneof`_ ``_source``. + start_time (int): + Scheduled start time for the outage + (milliseconds since Unix epoch). + + This field is a member of `oneof`_ ``_start_time``. + state (str): + State of this notification, which can take one of the + following values: - ACTIVE: This outage notification is + active. The event could be in the past, present, or future. + See start_time and end_time for scheduling. - CANCELLED: The + outage associated with this notification was cancelled + before the outage was due to start. - COMPLETED: The outage + associated with this notification is complete. Note that the + versions of this enum prefixed with "NS_" have been + deprecated in favor of the unprefixed values. Check the + State enum for the list of possible values. + + This field is a member of `oneof`_ ``_state``. + """ + class IssueType(proto.Enum): + r"""Form this outage is expected to take, which can take one of the + following values: - OUTAGE: The Interconnect may be completely out + of service for some or all of the specified window. - + PARTIAL_OUTAGE: Some circuits comprising the Interconnect as a whole + should remain up, but with reduced bandwidth. Note that the versions + of this enum prefixed with "IT_" have been deprecated in favor of + the unprefixed values. + """ + UNDEFINED_ISSUE_TYPE = 0 + IT_OUTAGE = 175779973 + IT_PARTIAL_OUTAGE = 92103971 + OUTAGE = 195285745 + PARTIAL_OUTAGE = 147053455 + + class Source(proto.Enum): + r"""The party that generated this notification, which can take the + following value: - GOOGLE: this notification as generated by Google. + Note that the value of NSRC_GOOGLE has been deprecated in favor of + GOOGLE. + """ + UNDEFINED_SOURCE = 0 + GOOGLE = 497439289 + NSRC_GOOGLE = 510574562 + + class State(proto.Enum): + r"""State of this notification, which can take one of the following + values: - ACTIVE: This outage notification is active. The event + could be in the past, present, or future. See start_time and + end_time for scheduling. - CANCELLED: The outage associated with + this notification was cancelled before the outage was due to start. + - COMPLETED: The outage associated with this notification is + complete. Note that the versions of this enum prefixed with "NS_" + have been deprecated in favor of the unprefixed values. + """ + UNDEFINED_STATE = 0 + ACTIVE = 314733318 + CANCELLED = 41957681 + COMPLETED = 309921323 + NS_ACTIVE = 252563136 + NS_CANCELED = 506579411 + + affected_circuits = proto.RepeatedField( + proto.STRING, + number=177717013, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + end_time = proto.Field( + proto.INT64, + number=114938801, + optional=True, + ) + issue_type = proto.Field( + proto.STRING, + number=369639136, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + source = proto.Field( + proto.STRING, + number=177235995, + optional=True, + ) + start_time = proto.Field( + proto.INT64, + number=37467274, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + + +class InterconnectsGetDiagnosticsResponse(proto.Message): + r"""Response for the InterconnectsGetDiagnosticsRequest. + + Attributes: + result (google.cloud.compute_v1.types.InterconnectDiagnostics): + + This field is a member of `oneof`_ ``_result``. + """ + + result = proto.Field( + proto.MESSAGE, + number=139315229, + optional=True, + message='InterconnectDiagnostics', + ) + + +class InvalidateCacheUrlMapRequest(proto.Message): + r"""A request message for UrlMaps.InvalidateCache. See the method + description for details. + + Attributes: + cache_invalidation_rule_resource (google.cloud.compute_v1.types.CacheInvalidationRule): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + url_map (str): + Name of the UrlMap scoping this request. + """ + + cache_invalidation_rule_resource = proto.Field( + proto.MESSAGE, + number=312795565, + message='CacheInvalidationRule', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + + +class Items(proto.Message): + r"""Metadata + + Attributes: + key (str): + Key for the metadata entry. Keys must conform to the + following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes + in length. This is reflected as part of a URL in the + metadata server. Additionally, to avoid ambiguity, keys must + not conflict with any other metadata keys for the project. + + This field is a member of `oneof`_ ``_key``. + value (str): + Value for the metadata entry. These are free- + orm strings, and only have meaning as + interpreted by the image running in the + instance. The only restriction placed on values + is that their size must be less than or equal to + 262144 bytes (256 KiB). + + This field is a member of `oneof`_ ``_value``. + """ + + key = proto.Field( + proto.STRING, + number=106079, + optional=True, + ) + value = proto.Field( + proto.STRING, + number=111972721, + optional=True, + ) + + +class License(proto.Message): + r"""Represents a License resource. A License represents billing and + aggregate usage data for public and marketplace images. *Caution* + This resource is intended for use only by third-party partners who + are creating Cloud Marketplace images. + + Attributes: + charges_use_fee (bool): + [Output Only] Deprecated. This field no longer reflects + whether a license charges a usage fee. + + This field is a member of `oneof`_ ``_charges_use_fee``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional textual description of the + resource; provided by the client when the + resource is created. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always compute#license for + licenses. + + This field is a member of `oneof`_ ``_kind``. + license_code (int): + [Output Only] The unique code used to attach this license to + images, snapshots, and disks. + + This field is a member of `oneof`_ ``_license_code``. + name (str): + Name of the resource. The name must be 1-63 + characters long and comply with RFC1035. + + This field is a member of `oneof`_ ``_name``. + resource_requirements (google.cloud.compute_v1.types.LicenseResourceRequirements): + + This field is a member of `oneof`_ ``_resource_requirements``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + transferable (bool): + If false, licenses will not be copied from + the source resource when creating an image from + a disk, disk from snapshot, or snapshot from + disk. + + This field is a member of `oneof`_ ``_transferable``. + """ + + charges_use_fee = proto.Field( + proto.BOOL, + number=372412622, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + license_code = proto.Field( + proto.UINT64, + number=1467179, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + resource_requirements = proto.Field( + proto.MESSAGE, + number=214292769, + optional=True, + message='LicenseResourceRequirements', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + transferable = proto.Field( + proto.BOOL, + number=4349893, + optional=True, + ) + + +class LicenseCode(proto.Message): + r"""Represents a License Code resource. A License Code is a unique + identifier used to represent a license resource. *Caution* This + resource is intended for use only by third-party partners who are + creating Cloud Marketplace images. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + [Output Only] Description of this License Code. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always compute#licenseCode + for licenses. + + This field is a member of `oneof`_ ``_kind``. + license_alias (Sequence[google.cloud.compute_v1.types.LicenseCodeLicenseAlias]): + [Output Only] URL and description aliases of Licenses with + the same License Code. + name (str): + [Output Only] Name of the resource. The name is 1-20 + characters long and must be a valid 64 bit integer. + + This field is a member of `oneof`_ ``_name``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + state (str): + [Output Only] Current state of this License Code. Check the + State enum for the list of possible values. + + This field is a member of `oneof`_ ``_state``. + transferable (bool): + [Output Only] If true, the license will remain attached when + creating images or snapshots from disks. Otherwise, the + license is not transferred. + + This field is a member of `oneof`_ ``_transferable``. + """ + class State(proto.Enum): + r"""[Output Only] Current state of this License Code.""" + UNDEFINED_STATE = 0 + DISABLED = 516696700 + ENABLED = 182130465 + RESTRICTED = 261551195 + STATE_UNSPECIFIED = 470755401 + TERMINATED = 250018339 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + license_alias = proto.RepeatedField( + proto.MESSAGE, + number=43550930, + message='LicenseCodeLicenseAlias', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + transferable = proto.Field( + proto.BOOL, + number=4349893, + optional=True, + ) + + +class LicenseCodeLicenseAlias(proto.Message): + r""" + + Attributes: + description (str): + [Output Only] Description of this License Code. + + This field is a member of `oneof`_ ``_description``. + self_link (str): + [Output Only] URL of license corresponding to this License + Code. + + This field is a member of `oneof`_ ``_self_link``. + """ + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class LicenseResourceCommitment(proto.Message): + r"""Commitment for a particular license resource. + + Attributes: + amount (int): + The number of licenses purchased. + + This field is a member of `oneof`_ ``_amount``. + cores_per_license (str): + Specifies the core range of the instance for + which this license applies. + + This field is a member of `oneof`_ ``_cores_per_license``. + license_ (str): + Any applicable license URI. + + This field is a member of `oneof`_ ``_license``. + """ + + amount = proto.Field( + proto.INT64, + number=196759640, + optional=True, + ) + cores_per_license = proto.Field( + proto.STRING, + number=32482324, + optional=True, + ) + license_ = proto.Field( + proto.STRING, + number=166757441, + optional=True, + ) + + +class LicenseResourceRequirements(proto.Message): + r""" + + Attributes: + min_guest_cpu_count (int): + Minimum number of guest cpus required to use + the Instance. Enforced at Instance creation and + Instance start. + + This field is a member of `oneof`_ ``_min_guest_cpu_count``. + min_memory_mb (int): + Minimum memory required to use the Instance. + Enforced at Instance creation and Instance + start. + + This field is a member of `oneof`_ ``_min_memory_mb``. + """ + + min_guest_cpu_count = proto.Field( + proto.INT32, + number=477964836, + optional=True, + ) + min_memory_mb = proto.Field( + proto.INT32, + number=504785894, + optional=True, + ) + + +class LicensesListResponse(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.License]): + A list of License resources. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='License', + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ListAcceleratorTypesRequest(proto.Message): + r"""A request message for AcceleratorTypes.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListAddressesRequest(proto.Message): + r"""A request message for Addresses.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListAssociationsFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.ListAssociations. See + the method description for details. + + Attributes: + target_resource (str): + The target resource to list associations. It + is an organization, or a folder. + + This field is a member of `oneof`_ ``_target_resource``. + """ + + target_resource = proto.Field( + proto.STRING, + number=467318524, + optional=True, + ) + + +class ListAutoscalersRequest(proto.Message): + r"""A request message for Autoscalers.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + Name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListAvailableFeaturesSslPoliciesRequest(proto.Message): + r"""A request message for SslPolicies.ListAvailableFeatures. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListBackendBucketsRequest(proto.Message): + r"""A request message for BackendBuckets.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListBackendServicesRequest(proto.Message): + r"""A request message for BackendServices.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListDiskTypesRequest(proto.Message): + r"""A request message for DiskTypes.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListDisksRequest(proto.Message): + r"""A request message for Disks.List. See the method description + for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListErrorsInstanceGroupManagersRequest(proto.Message): + r"""A request message for InstanceGroupManagers.ListErrors. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group_manager (str): + The name of the managed instance group. It must be a string + that meets the requirements in RFC1035, or an unsigned long + integer: must match regexp pattern: + (?:`a-z `__?)|1-9{0,19}. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the managed + instance group is located. It should conform to + RFC1035. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListErrorsRegionInstanceGroupManagersRequest(proto.Message): + r"""A request message for RegionInstanceGroupManagers.ListErrors. + See the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group_manager (str): + The name of the managed instance group. It must be a string + that meets the requirements in RFC1035, or an unsigned long + integer: must match regexp pattern: + (?:`a-z `__?)|1-9{0,19}. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. This + should conform to RFC1035. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListExternalVpnGatewaysRequest(proto.Message): + r"""A request message for ExternalVpnGateways.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListFirewallPoliciesRequest(proto.Message): + r"""A request message for FirewallPolicies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + parent_id (str): + Parent ID for this request. + + This field is a member of `oneof`_ ``_parent_id``. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + parent_id = proto.Field( + proto.STRING, + number=459714768, + optional=True, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListFirewallsRequest(proto.Message): + r"""A request message for Firewalls.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListForwardingRulesRequest(proto.Message): + r"""A request message for ForwardingRules.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListGlobalAddressesRequest(proto.Message): + r"""A request message for GlobalAddresses.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListGlobalForwardingRulesRequest(proto.Message): + r"""A request message for GlobalForwardingRules.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListGlobalNetworkEndpointGroupsRequest(proto.Message): + r"""A request message for GlobalNetworkEndpointGroups.List. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListGlobalOperationsRequest(proto.Message): + r"""A request message for GlobalOperations.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListGlobalOrganizationOperationsRequest(proto.Message): + r"""A request message for GlobalOrganizationOperations.List. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + parent_id (str): + Parent ID for this request. + + This field is a member of `oneof`_ ``_parent_id``. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + parent_id = proto.Field( + proto.STRING, + number=459714768, + optional=True, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListGlobalPublicDelegatedPrefixesRequest(proto.Message): + r"""A request message for GlobalPublicDelegatedPrefixes.List. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListHealthChecksRequest(proto.Message): + r"""A request message for HealthChecks.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListImagesRequest(proto.Message): + r"""A request message for Images.List. See the method description + for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListInstanceGroupManagersRequest(proto.Message): + r"""A request message for InstanceGroupManagers.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListInstanceGroupsRequest(proto.Message): + r"""A request message for InstanceGroups.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the instance group + is located. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListInstanceTemplatesRequest(proto.Message): + r"""A request message for InstanceTemplates.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListInstancesInstanceGroupsRequest(proto.Message): + r"""A request message for InstanceGroups.ListInstances. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group (str): + The name of the instance group from which you + want to generate a list of included instances. + instance_groups_list_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupsListInstancesRequest): + The body resource for this request + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the instance group + is located. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + instance_groups_list_instances_request_resource = proto.Field( + proto.MESSAGE, + number=476255263, + message='InstanceGroupsListInstancesRequest', + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListInstancesRegionInstanceGroupsRequest(proto.Message): + r"""A request message for RegionInstanceGroups.ListInstances. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group (str): + Name of the regional instance group for which + we want to list the instances. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_groups_list_instances_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupsListInstancesRequest): + The body resource for this request + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_groups_list_instances_request_resource = proto.Field( + proto.MESSAGE, + number=48239828, + message='RegionInstanceGroupsListInstancesRequest', + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListInstancesRequest(proto.Message): + r"""A request message for Instances.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListInterconnectAttachmentsRequest(proto.Message): + r"""A request message for InterconnectAttachments.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListInterconnectLocationsRequest(proto.Message): + r"""A request message for InterconnectLocations.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListInterconnectsRequest(proto.Message): + r"""A request message for Interconnects.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListLicensesRequest(proto.Message): + r"""A request message for Licenses.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListMachineTypesRequest(proto.Message): + r"""A request message for MachineTypes.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListManagedInstancesInstanceGroupManagersRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.ListManagedInstances. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group_manager (str): + The name of the managed instance group. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListManagedInstancesRegionInstanceGroupManagersRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.ListManagedInstances. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group_manager (str): + The name of the managed instance group. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListNetworkEndpointGroupsRequest(proto.Message): + r"""A request message for NetworkEndpointGroups.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the network + endpoint group is located. It should comply with + RFC1035. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest(proto.Message): + r"""A request message for + GlobalNetworkEndpointGroups.ListNetworkEndpoints. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + network_endpoint_group (str): + The name of the network endpoint group from + which you want to generate a list of included + network endpoints. It should comply with + RFC1035. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListNetworkEndpointsNetworkEndpointGroupsRequest(proto.Message): + r"""A request message for + NetworkEndpointGroups.ListNetworkEndpoints. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + network_endpoint_group (str): + The name of the network endpoint group from + which you want to generate a list of included + network endpoints. It should comply with + RFC1035. + network_endpoint_groups_list_endpoints_request_resource (google.cloud.compute_v1.types.NetworkEndpointGroupsListEndpointsRequest): + The body resource for this request + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the network + endpoint group is located. It should comply with + RFC1035. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + network_endpoint_group = proto.Field( + proto.STRING, + number=433907078, + ) + network_endpoint_groups_list_endpoints_request_resource = proto.Field( + proto.MESSAGE, + number=59493390, + message='NetworkEndpointGroupsListEndpointsRequest', + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListNetworksRequest(proto.Message): + r"""A request message for Networks.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListNodeGroupsRequest(proto.Message): + r"""A request message for NodeGroups.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListNodeTemplatesRequest(proto.Message): + r"""A request message for NodeTemplates.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListNodeTypesRequest(proto.Message): + r"""A request message for NodeTypes.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListNodesNodeGroupsRequest(proto.Message): + r"""A request message for NodeGroups.ListNodes. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + node_group (str): + Name of the NodeGroup resource whose nodes + you want to list. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + node_group = proto.Field( + proto.STRING, + number=469958146, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListPacketMirroringsRequest(proto.Message): + r"""A request message for PacketMirrorings.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListPeeringRoutesNetworksRequest(proto.Message): + r"""A request message for Networks.ListPeeringRoutes. See the + method description for details. + + Attributes: + direction (str): + The direction of the exchanged routes. + Check the Direction enum for the list of + possible values. + + This field is a member of `oneof`_ ``_direction``. + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + network (str): + Name of the network for this request. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + peering_name (str): + The response will show routes exchanged over + the given peering connection. + + This field is a member of `oneof`_ ``_peering_name``. + project (str): + Project ID for this request. + region (str): + The region of the request. The response will + include all subnet routes, static routes and + dynamic routes in the region. + + This field is a member of `oneof`_ ``_region``. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + class Direction(proto.Enum): + r"""The direction of the exchanged routes.""" + UNDEFINED_DIRECTION = 0 + INCOMING = 338552870 + OUTGOING = 307438444 + + direction = proto.Field( + proto.STRING, + number=111150975, + optional=True, + ) + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + peering_name = proto.Field( + proto.STRING, + number=249571370, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListPerInstanceConfigsInstanceGroupManagersRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.ListPerInstanceConfigs. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone where the managed + instance group is located. It should conform to + RFC1035. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListPerInstanceConfigsRegionInstanceGroupManagersRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.ListPerInstanceConfigs. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request, + should conform to RFC1035. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListPreconfiguredExpressionSetsSecurityPoliciesRequest(proto.Message): + r"""A request message for + SecurityPolicies.ListPreconfiguredExpressionSets. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListPublicAdvertisedPrefixesRequest(proto.Message): + r"""A request message for PublicAdvertisedPrefixes.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListPublicDelegatedPrefixesRequest(proto.Message): + r"""A request message for PublicDelegatedPrefixes.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region of this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListReferrersInstancesRequest(proto.Message): + r"""A request message for Instances.ListReferrers. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + instance (str): + Name of the target instance scoping this + request, or '-' if the request should span over + all instances in the container. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + The name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListRegionAutoscalersRequest(proto.Message): + r"""A request message for RegionAutoscalers.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionBackendServicesRequest(proto.Message): + r"""A request message for RegionBackendServices.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionCommitmentsRequest(proto.Message): + r"""A request message for RegionCommitments.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionDiskTypesRequest(proto.Message): + r"""A request message for RegionDiskTypes.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionDisksRequest(proto.Message): + r"""A request message for RegionDisks.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionHealthCheckServicesRequest(proto.Message): + r"""A request message for RegionHealthCheckServices.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionHealthChecksRequest(proto.Message): + r"""A request message for RegionHealthChecks.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionInstanceGroupManagersRequest(proto.Message): + r"""A request message for RegionInstanceGroupManagers.List. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionInstanceGroupsRequest(proto.Message): + r"""A request message for RegionInstanceGroups.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionNetworkEndpointGroupsRequest(proto.Message): + r"""A request message for RegionNetworkEndpointGroups.List. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + The name of the region where the network + endpoint group is located. It should comply with + RFC1035. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionNotificationEndpointsRequest(proto.Message): + r"""A request message for RegionNotificationEndpoints.List. See + the method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionOperationsRequest(proto.Message): + r"""A request message for RegionOperations.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionSslCertificatesRequest(proto.Message): + r"""A request message for RegionSslCertificates.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionTargetHttpProxiesRequest(proto.Message): + r"""A request message for RegionTargetHttpProxies.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionTargetHttpsProxiesRequest(proto.Message): + r"""A request message for RegionTargetHttpsProxies.List. See the + method description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionUrlMapsRequest(proto.Message): + r"""A request message for RegionUrlMaps.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRegionsRequest(proto.Message): + r"""A request message for Regions.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListReservationsRequest(proto.Message): + r"""A request message for Reservations.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + Name of the zone for this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListResourcePoliciesRequest(proto.Message): + r"""A request message for ResourcePolicies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRoutersRequest(proto.Message): + r"""A request message for Routers.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListRoutesRequest(proto.Message): + r"""A request message for Routes.List. See the method description + for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListSecurityPoliciesRequest(proto.Message): + r"""A request message for SecurityPolicies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListServiceAttachmentsRequest(proto.Message): + r"""A request message for ServiceAttachments.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region of this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListSnapshotsRequest(proto.Message): + r"""A request message for Snapshots.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListSslCertificatesRequest(proto.Message): + r"""A request message for SslCertificates.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListSslPoliciesRequest(proto.Message): + r"""A request message for SslPolicies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListSubnetworksRequest(proto.Message): + r"""A request message for Subnetworks.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListTargetGrpcProxiesRequest(proto.Message): + r"""A request message for TargetGrpcProxies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListTargetHttpProxiesRequest(proto.Message): + r"""A request message for TargetHttpProxies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListTargetHttpsProxiesRequest(proto.Message): + r"""A request message for TargetHttpsProxies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListTargetInstancesRequest(proto.Message): + r"""A request message for TargetInstances.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + Name of the zone scoping this request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListTargetPoolsRequest(proto.Message): + r"""A request message for TargetPools.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListTargetSslProxiesRequest(proto.Message): + r"""A request message for TargetSslProxies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListTargetTcpProxiesRequest(proto.Message): + r"""A request message for TargetTcpProxies.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListTargetVpnGatewaysRequest(proto.Message): + r"""A request message for TargetVpnGateways.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListUrlMapsRequest(proto.Message): + r"""A request message for UrlMaps.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListUsableSubnetworksRequest(proto.Message): + r"""A request message for Subnetworks.ListUsable. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListVpnGatewaysRequest(proto.Message): + r"""A request message for VpnGateways.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListVpnTunnelsRequest(proto.Message): + r"""A request message for VpnTunnels.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListXpnHostsProjectsRequest(proto.Message): + r"""A request message for Projects.ListXpnHosts. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + projects_list_xpn_hosts_request_resource (google.cloud.compute_v1.types.ProjectsListXpnHostsRequest): + The body resource for this request + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + projects_list_xpn_hosts_request_resource = proto.Field( + proto.MESSAGE, + number=238266391, + message='ProjectsListXpnHostsRequest', + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class ListZoneOperationsRequest(proto.Message): + r"""A request message for ZoneOperations.List. See the method + description for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + zone (str): + Name of the zone for request. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ListZonesRequest(proto.Message): + r"""A request message for Zones.List. See the method description + for details. + + Attributes: + filter (str): + A filter expression that filters resources listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be either ``=``, + ``!=``, ``>``, or ``<``. For example, if you are filtering + Compute Engine instances, you can exclude instances named + ``example-instance`` by specifying + ``name != example-instance``. You can also filter nested + fields. For example, you could specify + ``scheduling.automaticRestart = false`` to include instances + only if they are not scheduled for automatic restarts. You + can use filtering on nested fields to filter based on + resource labels. To filter on multiple expressions, provide + each separate expression within parentheses. For example: + ``(scheduling.automaticRestart = true) (cpuPlatform = "Intel Skylake")`` + By default, each expression is an ``AND`` expression. + However, you can include ``AND`` and ``OR`` expressions + explicitly. For example: + ``(cpuPlatform = "Intel Skylake") OR (cpuPlatform = "Intel Broadwell") AND (scheduling.automaticRestart = true)`` + + This field is a member of `oneof`_ ``_filter``. + max_results (int): + The maximum number of results per page that should be + returned. If the number of available results is larger than + ``maxResults``, Compute Engine returns a ``nextPageToken`` + that can be used to get the next page of results in + subsequent list requests. Acceptable values are ``0`` to + ``500``, inclusive. (Default: ``500``) + + This field is a member of `oneof`_ ``_max_results``. + order_by (str): + Sorts list results by a certain order. By default, results + are returned in alphanumerical order based on the resource + name. You can also sort results in descending order based on + the creation timestamp using + ``orderBy="creationTimestamp desc"``. This sorts results + based on the ``creationTimestamp`` field in reverse + chronological order (newest result first). Use this to sort + resources like operations so that the newest operation is + returned first. Currently, only sorting by ``name`` or + ``creationTimestamp desc`` is supported. + + This field is a member of `oneof`_ ``_order_by``. + page_token (str): + Specifies a page token to use. Set ``pageToken`` to the + ``nextPageToken`` returned by a previous list request to get + the next page of results. + + This field is a member of `oneof`_ ``_page_token``. + project (str): + Project ID for this request. + return_partial_success (bool): + Opt-in for partial success behavior which + provides partial results in case of failure. The + default value is false. + + This field is a member of `oneof`_ ``_return_partial_success``. + """ + + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + max_results = proto.Field( + proto.UINT32, + number=54715419, + optional=True, + ) + order_by = proto.Field( + proto.STRING, + number=160562920, + optional=True, + ) + page_token = proto.Field( + proto.STRING, + number=19994697, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + return_partial_success = proto.Field( + proto.BOOL, + number=517198390, + optional=True, + ) + + +class LocalDisk(proto.Message): + r""" + + Attributes: + disk_count (int): + Specifies the number of such disks. + + This field is a member of `oneof`_ ``_disk_count``. + disk_size_gb (int): + Specifies the size of the disk in base-2 GB. + + This field is a member of `oneof`_ ``_disk_size_gb``. + disk_type (str): + Specifies the desired disk type on the node. + This disk type must be a local storage type + (e.g.: local-ssd). Note that for nodeTemplates, + this should be the name of the disk type and not + its URL. + + This field is a member of `oneof`_ ``_disk_type``. + """ + + disk_count = proto.Field( + proto.INT32, + number=182933485, + optional=True, + ) + disk_size_gb = proto.Field( + proto.INT32, + number=316263735, + optional=True, + ) + disk_type = proto.Field( + proto.STRING, + number=93009052, + optional=True, + ) + + +class LocationPolicy(proto.Message): + r"""Configuration for location policy among multiple possible + locations (e.g. preferences for zone selection among zones in a + single region). + + Attributes: + locations (Sequence[google.cloud.compute_v1.types.LocationPolicy.LocationsEntry]): + Location configurations mapped by location + name. Currently only zone names are supported + and must be represented as valid internal URLs, + such as zones/us-central1-a. + """ + + locations = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=413423454, + message='LocationPolicyLocation', + ) + + +class LocationPolicyLocation(proto.Message): + r""" + + Attributes: + preference (str): + Preference for a given location: ALLOW or + DENY. Check the Preference enum for the list of + possible values. + + This field is a member of `oneof`_ ``_preference``. + """ + class Preference(proto.Enum): + r"""Preference for a given location: ALLOW or DENY.""" + UNDEFINED_PREFERENCE = 0 + ALLOW = 62368553 + DENY = 2094604 + PREFERENCE_UNSPECIFIED = 496219571 + + preference = proto.Field( + proto.STRING, + number=150781147, + optional=True, + ) + + +class LogConfig(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + cloud_audit (google.cloud.compute_v1.types.LogConfigCloudAuditOptions): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_cloud_audit``. + counter (google.cloud.compute_v1.types.LogConfigCounterOptions): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_counter``. + data_access (google.cloud.compute_v1.types.LogConfigDataAccessOptions): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_data_access``. + """ + + cloud_audit = proto.Field( + proto.MESSAGE, + number=412852561, + optional=True, + message='LogConfigCloudAuditOptions', + ) + counter = proto.Field( + proto.MESSAGE, + number=420959740, + optional=True, + message='LogConfigCounterOptions', + ) + data_access = proto.Field( + proto.MESSAGE, + number=286633881, + optional=True, + message='LogConfigDataAccessOptions', + ) + + +class LogConfigCloudAuditOptions(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + authorization_logging_options (google.cloud.compute_v1.types.AuthorizationLoggingOptions): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_authorization_logging_options``. + log_name (str): + This is deprecated and has no effect. Do not + use. Check the LogName enum for the list of + possible values. + + This field is a member of `oneof`_ ``_log_name``. + """ + class LogName(proto.Enum): + r"""This is deprecated and has no effect. Do not use.""" + UNDEFINED_LOG_NAME = 0 + ADMIN_ACTIVITY = 427503135 + DATA_ACCESS = 238070681 + UNSPECIFIED_LOG_NAME = 410515182 + + authorization_logging_options = proto.Field( + proto.MESSAGE, + number=217861624, + optional=True, + message='AuthorizationLoggingOptions', + ) + log_name = proto.Field( + proto.STRING, + number=402913958, + optional=True, + ) + + +class LogConfigCounterOptions(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + custom_fields (Sequence[google.cloud.compute_v1.types.LogConfigCounterOptionsCustomField]): + This is deprecated and has no effect. Do not + use. + field (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_field``. + metric (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_metric``. + """ + + custom_fields = proto.RepeatedField( + proto.MESSAGE, + number=249651015, + message='LogConfigCounterOptionsCustomField', + ) + field = proto.Field( + proto.STRING, + number=97427706, + optional=True, + ) + metric = proto.Field( + proto.STRING, + number=533067184, + optional=True, + ) + + +class LogConfigCounterOptionsCustomField(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + name (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_name``. + value (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_value``. + """ + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + value = proto.Field( + proto.STRING, + number=111972721, + optional=True, + ) + + +class LogConfigDataAccessOptions(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + log_mode (str): + This is deprecated and has no effect. Do not + use. Check the LogMode enum for the list of + possible values. + + This field is a member of `oneof`_ ``_log_mode``. + """ + class LogMode(proto.Enum): + r"""This is deprecated and has no effect. Do not use.""" + UNDEFINED_LOG_MODE = 0 + LOG_FAIL_CLOSED = 360469778 + LOG_MODE_UNSPECIFIED = 88160822 + + log_mode = proto.Field( + proto.STRING, + number=402897342, + optional=True, + ) + + +class MachineType(proto.Message): + r"""Represents a Machine Type resource. You can use specific + machine types for your VM instances based on performance and + pricing requirements. For more information, read Machine Types. + + Attributes: + accelerators (Sequence[google.cloud.compute_v1.types.Accelerators]): + [Output Only] A list of accelerator configurations assigned + to this machine type. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + deprecated (google.cloud.compute_v1.types.DeprecationStatus): + [Output Only] The deprecation status associated with this + machine type. Only applicable if the machine type is + unavailable. + + This field is a member of `oneof`_ ``_deprecated``. + description (str): + [Output Only] An optional textual description of the + resource. + + This field is a member of `oneof`_ ``_description``. + guest_cpus (int): + [Output Only] The number of virtual CPUs that are available + to the instance. + + This field is a member of `oneof`_ ``_guest_cpus``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + image_space_gb (int): + [Deprecated] This property is deprecated and will never be + populated with any relevant values. + + This field is a member of `oneof`_ ``_image_space_gb``. + is_shared_cpu (bool): + [Output Only] Whether this machine type has a shared CPU. + See Shared-core machine types for more information. + + This field is a member of `oneof`_ ``_is_shared_cpu``. + kind (str): + [Output Only] The type of the resource. Always + compute#machineType for machine types. + + This field is a member of `oneof`_ ``_kind``. + maximum_persistent_disks (int): + [Output Only] Maximum persistent disks allowed. + + This field is a member of `oneof`_ ``_maximum_persistent_disks``. + maximum_persistent_disks_size_gb (int): + [Output Only] Maximum total persistent disks size (GB) + allowed. + + This field is a member of `oneof`_ ``_maximum_persistent_disks_size_gb``. + memory_mb (int): + [Output Only] The amount of physical memory available to the + instance, defined in MB. + + This field is a member of `oneof`_ ``_memory_mb``. + name (str): + [Output Only] Name of the resource. + + This field is a member of `oneof`_ ``_name``. + scratch_disks (Sequence[google.cloud.compute_v1.types.ScratchDisks]): + [Output Only] A list of extended scratch disks assigned to + the instance. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + zone (str): + [Output Only] The name of the zone where the machine type + resides, such as us-central1-a. + + This field is a member of `oneof`_ ``_zone``. + """ + + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=269577064, + message='Accelerators', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + deprecated = proto.Field( + proto.MESSAGE, + number=515138995, + optional=True, + message='DeprecationStatus', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + guest_cpus = proto.Field( + proto.INT32, + number=393356754, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + image_space_gb = proto.Field( + proto.INT32, + number=75331864, + optional=True, + ) + is_shared_cpu = proto.Field( + proto.BOOL, + number=521399555, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + maximum_persistent_disks = proto.Field( + proto.INT32, + number=496220941, + optional=True, + ) + maximum_persistent_disks_size_gb = proto.Field( + proto.INT64, + number=154274471, + optional=True, + ) + memory_mb = proto.Field( + proto.INT32, + number=116001171, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + scratch_disks = proto.RepeatedField( + proto.MESSAGE, + number=480778481, + message='ScratchDisks', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class MachineTypeAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.MachineTypeAggregatedList.ItemsEntry]): + A list of MachineTypesScopedList resources. + kind (str): + [Output Only] Type of resource. Always + compute#machineTypeAggregatedList for aggregated lists of + machine types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='MachineTypesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class MachineTypeList(proto.Message): + r"""Contains a list of machine types. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.MachineType]): + A list of MachineType resources. + kind (str): + [Output Only] Type of resource. Always + compute#machineTypeList for lists of machine types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='MachineType', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class MachineTypesScopedList(proto.Message): + r""" + + Attributes: + machine_types (Sequence[google.cloud.compute_v1.types.MachineType]): + [Output Only] A list of machine types contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] An informational warning that appears when the + machine types list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + machine_types = proto.RepeatedField( + proto.MESSAGE, + number=79720065, + message='MachineType', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ManagedInstance(proto.Message): + r"""A Managed Instance resource. + + Attributes: + current_action (str): + [Output Only] The current action that the managed instance + group has scheduled for the instance. Possible values: - + NONE The instance is running, and the managed instance group + does not have any scheduled actions for this instance. - + CREATING The managed instance group is creating this + instance. If the group fails to create this instance, it + will try again until it is successful. - + CREATING_WITHOUT_RETRIES The managed instance group is + attempting to create this instance only once. If the group + fails to create this instance, it does not try again and the + group's targetSize value is decreased instead. - RECREATING + The managed instance group is recreating this instance. - + DELETING The managed instance group is permanently deleting + this instance. - ABANDONING The managed instance group is + abandoning this instance. The instance will be removed from + the instance group and from any target pools that are + associated with this group. - RESTARTING The managed + instance group is restarting the instance. - REFRESHING The + managed instance group is applying configuration changes to + the instance without stopping it. For example, the group can + update the target pool list for an instance without stopping + that instance. - VERIFYING The managed instance group has + created the instance and it is in the process of being + verified. Check the CurrentAction enum for the list of + possible values. + + This field is a member of `oneof`_ ``_current_action``. + id (int): + [Output only] The unique identifier for this resource. This + field is empty when instance does not exist. + + This field is a member of `oneof`_ ``_id``. + instance (str): + [Output Only] The URL of the instance. The URL can exist + even if the instance has not yet been created. + + This field is a member of `oneof`_ ``_instance``. + instance_health (Sequence[google.cloud.compute_v1.types.ManagedInstanceInstanceHealth]): + [Output Only] Health state of the instance per health-check. + instance_status (str): + [Output Only] The status of the instance. This field is + empty when the instance does not exist. Check the + InstanceStatus enum for the list of possible values. + + This field is a member of `oneof`_ ``_instance_status``. + last_attempt (google.cloud.compute_v1.types.ManagedInstanceLastAttempt): + [Output Only] Information about the last attempt to create + or delete the instance. + + This field is a member of `oneof`_ ``_last_attempt``. + preserved_state_from_config (google.cloud.compute_v1.types.PreservedState): + [Output Only] Preserved state applied from per-instance + config for this instance. + + This field is a member of `oneof`_ ``_preserved_state_from_config``. + preserved_state_from_policy (google.cloud.compute_v1.types.PreservedState): + [Output Only] Preserved state generated based on stateful + policy for this instance. + + This field is a member of `oneof`_ ``_preserved_state_from_policy``. + version (google.cloud.compute_v1.types.ManagedInstanceVersion): + [Output Only] Intended version of this instance. + + This field is a member of `oneof`_ ``_version``. + """ + class CurrentAction(proto.Enum): + r"""[Output Only] The current action that the managed instance group has + scheduled for the instance. Possible values: - NONE The instance is + running, and the managed instance group does not have any scheduled + actions for this instance. - CREATING The managed instance group is + creating this instance. If the group fails to create this instance, + it will try again until it is successful. - CREATING_WITHOUT_RETRIES + The managed instance group is attempting to create this instance + only once. If the group fails to create this instance, it does not + try again and the group's targetSize value is decreased instead. - + RECREATING The managed instance group is recreating this instance. - + DELETING The managed instance group is permanently deleting this + instance. - ABANDONING The managed instance group is abandoning this + instance. The instance will be removed from the instance group and + from any target pools that are associated with this group. - + RESTARTING The managed instance group is restarting the instance. - + REFRESHING The managed instance group is applying configuration + changes to the instance without stopping it. For example, the group + can update the target pool list for an instance without stopping + that instance. - VERIFYING The managed instance group has created + the instance and it is in the process of being verified. + """ + UNDEFINED_CURRENT_ACTION = 0 + ABANDONING = 388244813 + CREATING = 455564985 + CREATING_WITHOUT_RETRIES = 428843785 + DELETING = 528602024 + NONE = 2402104 + RECREATING = 287278572 + REFRESHING = 163266343 + RESTARTING = 320534387 + VERIFYING = 16982185 + + class InstanceStatus(proto.Enum): + r"""[Output Only] The status of the instance. This field is empty when + the instance does not exist. + """ + UNDEFINED_INSTANCE_STATUS = 0 + DEPROVISIONING = 428935662 + PROVISIONING = 290896621 + REPAIRING = 413483285 + RUNNING = 121282975 + STAGING = 431072283 + STOPPED = 444276141 + STOPPING = 350791796 + SUSPENDED = 51223995 + SUSPENDING = 514206246 + TERMINATED = 250018339 + + current_action = proto.Field( + proto.STRING, + number=178475964, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + optional=True, + ) + instance_health = proto.RepeatedField( + proto.MESSAGE, + number=382667078, + message='ManagedInstanceInstanceHealth', + ) + instance_status = proto.Field( + proto.STRING, + number=174577372, + optional=True, + ) + last_attempt = proto.Field( + proto.MESSAGE, + number=434771492, + optional=True, + message='ManagedInstanceLastAttempt', + ) + preserved_state_from_config = proto.Field( + proto.MESSAGE, + number=98661858, + optional=True, + message='PreservedState', + ) + preserved_state_from_policy = proto.Field( + proto.MESSAGE, + number=470783954, + optional=True, + message='PreservedState', + ) + version = proto.Field( + proto.MESSAGE, + number=351608024, + optional=True, + message='ManagedInstanceVersion', + ) + + +class ManagedInstanceInstanceHealth(proto.Message): + r""" + + Attributes: + detailed_health_state (str): + [Output Only] The current detailed instance health state. + Check the DetailedHealthState enum for the list of possible + values. + + This field is a member of `oneof`_ ``_detailed_health_state``. + health_check (str): + [Output Only] The URL for the health check that verifies + whether the instance is healthy. + + This field is a member of `oneof`_ ``_health_check``. + """ + class DetailedHealthState(proto.Enum): + r"""[Output Only] The current detailed instance health state.""" + UNDEFINED_DETAILED_HEALTH_STATE = 0 + DRAINING = 480455402 + HEALTHY = 439801213 + TIMEOUT = 477813057 + UNHEALTHY = 462118084 + UNKNOWN = 433141802 + + detailed_health_state = proto.Field( + proto.STRING, + number=510470173, + optional=True, + ) + health_check = proto.Field( + proto.STRING, + number=308876645, + optional=True, + ) + + +class ManagedInstanceLastAttempt(proto.Message): + r""" + + Attributes: + errors (google.cloud.compute_v1.types.Errors): + [Output Only] Encountered errors during the last attempt to + create or delete the instance. + + This field is a member of `oneof`_ ``_errors``. + """ + + errors = proto.Field( + proto.MESSAGE, + number=315977579, + optional=True, + message='Errors', + ) + + +class ManagedInstanceVersion(proto.Message): + r""" + + Attributes: + instance_template (str): + [Output Only] The intended template of the instance. This + field is empty when current_action is one of { DELETING, + ABANDONING }. + + This field is a member of `oneof`_ ``_instance_template``. + name (str): + [Output Only] Name of the version. + + This field is a member of `oneof`_ ``_name``. + """ + + instance_template = proto.Field( + proto.STRING, + number=309248228, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + + +class Metadata(proto.Message): + r"""A metadata key/value entry. + + Attributes: + fingerprint (str): + Specifies a fingerprint for this request, + which is essentially a hash of the metadata's + contents and used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update metadata. You must always provide an + up-to-date fingerprint hash in order to update + or change metadata, otherwise the request will + fail with error 412 conditionNotMet. To see the + latest fingerprint, make a get() request to + retrieve the resource. + + This field is a member of `oneof`_ ``_fingerprint``. + items (Sequence[google.cloud.compute_v1.types.Items]): + Array of key/value pairs. The total size of + all keys and values must be less than 512 KB. + kind (str): + [Output Only] Type of the resource. Always compute#metadata + for metadata. + + This field is a member of `oneof`_ ``_kind``. + """ + + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Items', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + + +class MetadataFilter(proto.Message): + r"""Opaque filter criteria used by loadbalancers to restrict routing + configuration to a limited set of loadbalancing proxies. Proxies and + sidecars involved in loadbalancing would typically present metadata + to the loadbalancers which need to match criteria specified here. If + a match takes place, the relevant configuration is made available to + those proxies. For each metadataFilter in this list, if its + filterMatchCriteria is set to MATCH_ANY, at least one of the + filterLabels must match the corresponding label provided in the + metadata. If its filterMatchCriteria is set to MATCH_ALL, then all + of its filterLabels must match with corresponding labels provided in + the metadata. An example for using metadataFilters would be: if + loadbalancing involves Envoys, they will only receive routing + configuration when values in metadataFilters match values supplied + in of their XDS requests to loadbalancers. + + Attributes: + filter_labels (Sequence[google.cloud.compute_v1.types.MetadataFilterLabelMatch]): + The list of label value pairs that must match + labels in the provided metadata based on + filterMatchCriteria This list must not be empty + and can have at the most 64 entries. + filter_match_criteria (str): + Specifies how individual filterLabel matches within the list + of filterLabels contribute towards the overall + metadataFilter match. Supported values are: - MATCH_ANY: At + least one of the filterLabels must have a matching label in + the provided metadata. - MATCH_ALL: All filterLabels must + have matching labels in the provided metadata. Check the + FilterMatchCriteria enum for the list of possible values. + + This field is a member of `oneof`_ ``_filter_match_criteria``. + """ + class FilterMatchCriteria(proto.Enum): + r"""Specifies how individual filterLabel matches within the list of + filterLabels contribute towards the overall metadataFilter match. + Supported values are: - MATCH_ANY: At least one of the filterLabels + must have a matching label in the provided metadata. - MATCH_ALL: + All filterLabels must have matching labels in the provided metadata. + """ + UNDEFINED_FILTER_MATCH_CRITERIA = 0 + MATCH_ALL = 180663271 + MATCH_ANY = 180663346 + NOT_SET = 163646646 + + filter_labels = proto.RepeatedField( + proto.MESSAGE, + number=307903142, + message='MetadataFilterLabelMatch', + ) + filter_match_criteria = proto.Field( + proto.STRING, + number=239970368, + optional=True, + ) + + +class MetadataFilterLabelMatch(proto.Message): + r"""MetadataFilter label name value pairs that are expected to + match corresponding labels presented as metadata to the + loadbalancer. + + Attributes: + name (str): + Name of metadata label. The name can have a + maximum length of 1024 characters and must be at + least 1 character long. + + This field is a member of `oneof`_ ``_name``. + value (str): + The value of the label must match the + specified value. value can have a maximum length + of 1024 characters. + + This field is a member of `oneof`_ ``_value``. + """ + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + value = proto.Field( + proto.STRING, + number=111972721, + optional=True, + ) + + +class MoveDiskProjectRequest(proto.Message): + r"""A request message for Projects.MoveDisk. See the method + description for details. + + Attributes: + disk_move_request_resource (google.cloud.compute_v1.types.DiskMoveRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + disk_move_request_resource = proto.Field( + proto.MESSAGE, + number=313008458, + message='DiskMoveRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class MoveFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.Move. See the method + description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + parent_id (str): + The new parent of the firewall policy. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + parent_id = proto.Field( + proto.STRING, + number=459714768, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class MoveInstanceProjectRequest(proto.Message): + r"""A request message for Projects.MoveInstance. See the method + description for details. + + Attributes: + instance_move_request_resource (google.cloud.compute_v1.types.InstanceMoveRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_move_request_resource = proto.Field( + proto.MESSAGE, + number=311664194, + message='InstanceMoveRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class NamedPort(proto.Message): + r"""The named port. For example: <"http", 80>. + + Attributes: + name (str): + The name for this named port. The name must + be 1-63 characters long, and comply with + RFC1035. + + This field is a member of `oneof`_ ``_name``. + port (int): + The port number, which can be a value between + 1 and 65535. + + This field is a member of `oneof`_ ``_port``. + """ + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + + +class Network(proto.Message): + r"""Represents a VPC Network resource. Networks connect resources + to each other and to the internet. For more information, read + Virtual Private Cloud (VPC) Network. + + Attributes: + I_pv4_range (str): + Deprecated in favor of subnet mode networks. + The range of internal addresses that are legal + on this network. This range is a CIDR + specification, for example: 192.168.0.0/16. + Provided by the client when the network is + created. + + This field is a member of `oneof`_ ``_I_pv4_range``. + auto_create_subnetworks (bool): + Must be set to create a VPC network. If not + set, a legacy network is created. When set to + true, the VPC network is created in auto mode. + When set to false, the VPC network is created in + custom mode. An auto mode VPC network starts + with one subnet per region. Each subnet has a + predetermined range as described in Auto mode + VPC network IP ranges. For custom mode VPC + networks, you can add subnets using the + subnetworks insert method. + + This field is a member of `oneof`_ ``_auto_create_subnetworks``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this field when you create the resource. + + This field is a member of `oneof`_ ``_description``. + gateway_i_pv4 (str): + [Output Only] The gateway address for default routing out of + the network, selected by GCP. + + This field is a member of `oneof`_ ``_gateway_i_pv4``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#network + for networks. + + This field is a member of `oneof`_ ``_kind``. + mtu (int): + Maximum Transmission Unit in bytes. The + minimum value for this field is 1460 and the + maximum value is 1500 bytes. If unspecified, + defaults to 1460. + + This field is a member of `oneof`_ ``_mtu``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?``. The first character must be + a lowercase letter, and all following characters (except for + the last character) must be a dash, lowercase letter, or + digit. The last character must be a lowercase letter or + digit. + + This field is a member of `oneof`_ ``_name``. + peerings (Sequence[google.cloud.compute_v1.types.NetworkPeering]): + [Output Only] A list of network peerings for the resource. + routing_config (google.cloud.compute_v1.types.NetworkRoutingConfig): + The network-level routing configuration for + this network. Used by Cloud Router to determine + what type of network-wide routing behavior to + enforce. + + This field is a member of `oneof`_ ``_routing_config``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + subnetworks (Sequence[str]): + [Output Only] Server-defined fully-qualified URLs for all + subnetworks in this VPC network. + """ + + I_pv4_range = proto.Field( + proto.STRING, + number=59234358, + optional=True, + ) + auto_create_subnetworks = proto.Field( + proto.BOOL, + number=256156690, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + gateway_i_pv4 = proto.Field( + proto.STRING, + number=178678877, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + mtu = proto.Field( + proto.INT32, + number=108462, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + peerings = proto.RepeatedField( + proto.MESSAGE, + number=69883187, + message='NetworkPeering', + ) + routing_config = proto.Field( + proto.MESSAGE, + number=523556059, + optional=True, + message='NetworkRoutingConfig', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + subnetworks = proto.RepeatedField( + proto.STRING, + number=415853125, + ) + + +class NetworkEndpoint(proto.Message): + r"""The network endpoint. + + Attributes: + annotations (Sequence[google.cloud.compute_v1.types.NetworkEndpoint.AnnotationsEntry]): + Metadata defined as annotations on the + network endpoint. + fqdn (str): + Optional fully qualified domain name of network endpoint. + This can only be specified when + NetworkEndpointGroup.network_endpoint_type is + NON_GCP_FQDN_PORT. + + This field is a member of `oneof`_ ``_fqdn``. + instance (str): + The name for a specific VM instance that the IP address + belongs to. This is required for network endpoints of type + GCE_VM_IP_PORT. The instance must be in the same zone of + network endpoint group. The name must be 1-63 characters + long, and comply with RFC1035. + + This field is a member of `oneof`_ ``_instance``. + ip_address (str): + Optional IPv4 address of network endpoint. + The IP address must belong to a VM in Compute + Engine (either the primary IP or as part of an + aliased IP range). If the IP address is not + specified, then the primary IP address for the + VM instance in the network that the network + endpoint group belongs to will be used. + + This field is a member of `oneof`_ ``_ip_address``. + port (int): + Optional port number of network endpoint. If + not specified, the defaultPort for the network + endpoint group will be used. + + This field is a member of `oneof`_ ``_port``. + """ + + annotations = proto.MapField( + proto.STRING, + proto.STRING, + number=112032548, + ) + fqdn = proto.Field( + proto.STRING, + number=3150485, + optional=True, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + optional=True, + ) + ip_address = proto.Field( + proto.STRING, + number=406272220, + optional=True, + ) + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + + +class NetworkEndpointGroup(proto.Message): + r"""Represents a collection of network endpoints. A network + endpoint group (NEG) defines how a set of endpoints should be + reached, whether they are reachable, and where they are located. + For more information about using NEGs, see Setting up external + HTTP(S) Load Balancing with internet NEGs, Setting up zonal + NEGs, or Setting up external HTTP(S) Load Balancing with + serverless NEGs. + + Attributes: + annotations (Sequence[google.cloud.compute_v1.types.NetworkEndpointGroup.AnnotationsEntry]): + Metadata defined as annotations on the + network endpoint group. + app_engine (google.cloud.compute_v1.types.NetworkEndpointGroupAppEngine): + Only valid when networkEndpointType is + "SERVERLESS". Only one of cloudRun, appEngine or + cloudFunction may be set. + + This field is a member of `oneof`_ ``_app_engine``. + cloud_function (google.cloud.compute_v1.types.NetworkEndpointGroupCloudFunction): + Only valid when networkEndpointType is + "SERVERLESS". Only one of cloudRun, appEngine or + cloudFunction may be set. + + This field is a member of `oneof`_ ``_cloud_function``. + cloud_run (google.cloud.compute_v1.types.NetworkEndpointGroupCloudRun): + Only valid when networkEndpointType is + "SERVERLESS". Only one of cloudRun, appEngine or + cloudFunction may be set. + + This field is a member of `oneof`_ ``_cloud_run``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + default_port (int): + The default port used if the port number is + not specified in the network endpoint. + + This field is a member of `oneof`_ ``_default_port``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#networkEndpointGroup for network endpoint group. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource; provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network (str): + The URL of the network to which all network + endpoints in the NEG belong. Uses "default" + project network if unspecified. + + This field is a member of `oneof`_ ``_network``. + network_endpoint_type (str): + Type of network endpoints in this network endpoint group. + Can be one of GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, + INTERNET_FQDN_PORT, INTERNET_IP_PORT, SERVERLESS, + PRIVATE_SERVICE_CONNECT. Check the NetworkEndpointType enum + for the list of possible values. + + This field is a member of `oneof`_ ``_network_endpoint_type``. + region (str): + [Output Only] The URL of the region where the network + endpoint group is located. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + size (int): + [Output only] Number of network endpoints in the network + endpoint group. + + This field is a member of `oneof`_ ``_size``. + subnetwork (str): + Optional URL of the subnetwork to which all + network endpoints in the NEG belong. + + This field is a member of `oneof`_ ``_subnetwork``. + zone (str): + [Output Only] The URL of the zone where the network endpoint + group is located. + + This field is a member of `oneof`_ ``_zone``. + """ + class NetworkEndpointType(proto.Enum): + r"""Type of network endpoints in this network endpoint group. Can be one + of GCE_VM_IP_PORT, NON_GCP_PRIVATE_IP_PORT, INTERNET_FQDN_PORT, + INTERNET_IP_PORT, SERVERLESS, PRIVATE_SERVICE_CONNECT. + """ + UNDEFINED_NETWORK_ENDPOINT_TYPE = 0 + GCE_VM_IP = 401880793 + GCE_VM_IP_PORT = 501838375 + INTERNET_FQDN_PORT = 404154477 + INTERNET_IP_PORT = 477719963 + NON_GCP_PRIVATE_IP_PORT = 336447968 + SERVERLESS = 270492508 + + annotations = proto.MapField( + proto.STRING, + proto.STRING, + number=112032548, + ) + app_engine = proto.Field( + proto.MESSAGE, + number=340788768, + optional=True, + message='NetworkEndpointGroupAppEngine', + ) + cloud_function = proto.Field( + proto.MESSAGE, + number=519893666, + optional=True, + message='NetworkEndpointGroupCloudFunction', + ) + cloud_run = proto.Field( + proto.MESSAGE, + number=111060353, + optional=True, + message='NetworkEndpointGroupCloudRun', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + default_port = proto.Field( + proto.INT32, + number=423377855, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + network_endpoint_type = proto.Field( + proto.STRING, + number=118301523, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + size = proto.Field( + proto.INT32, + number=3530753, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class NetworkEndpointGroupAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NetworkEndpointGroupAggregatedList.ItemsEntry]): + A list of NetworkEndpointGroupsScopedList + resources. + kind (str): + [Output Only] The resource type, which is always + compute#networkEndpointGroupAggregatedList for aggregated + lists of network endpoint groups. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='NetworkEndpointGroupsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NetworkEndpointGroupAppEngine(proto.Message): + r"""Configuration for an App Engine network endpoint group (NEG). + The service is optional, may be provided explicitly or in the + URL mask. The version is optional and can only be provided + explicitly or in the URL mask when service is present. Note: App + Engine service must be in the same project and located in the + same region as the Serverless NEG. + + Attributes: + service (str): + Optional serving service. The service name is + case-sensitive and must be 1-63 characters long. + Example value: "default", "my-service". + + This field is a member of `oneof`_ ``_service``. + url_mask (str): + A template to parse service and version + fields from a request URL. URL mask allows for + routing to multiple App Engine services without + having to create multiple Network Endpoint + Groups and backend services. For example, the + request URLs "foo1-dot-appname.appspot.com/v1" + and "foo1-dot-appname.appspot.com/v2" can be + backed by the same Serverless NEG with URL mask + "-dot-appname.appspot.com/". The URL mask will + parse them to { service = "foo1", version = "v1" + } and { service = "foo1", version = "v2" } + respectively. + + This field is a member of `oneof`_ ``_url_mask``. + version (str): + Optional serving version. The version name is + case-sensitive and must be 1-100 characters + long. Example value: "v1", "v2". + + This field is a member of `oneof`_ ``_version``. + """ + + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + url_mask = proto.Field( + proto.STRING, + number=103352252, + optional=True, + ) + version = proto.Field( + proto.STRING, + number=351608024, + optional=True, + ) + + +class NetworkEndpointGroupCloudFunction(proto.Message): + r"""Configuration for a Cloud Function network endpoint group + (NEG). The function must be provided explicitly or in the URL + mask. Note: Cloud Function must be in the same project and + located in the same region as the Serverless NEG. + + Attributes: + function (str): + A user-defined name of the Cloud Function. + The function name is case-sensitive and must be + 1-63 characters long. Example value: "func1". + + This field is a member of `oneof`_ ``_function``. + url_mask (str): + A template to parse function field from a + request URL. URL mask allows for routing to + multiple Cloud Functions without having to + create multiple Network Endpoint Groups and + backend services. For example, request URLs " + mydomain.com/function1" and + "mydomain.com/function2" can be backed by the + same Serverless NEG with URL mask "/". The URL + mask will parse them to { function = "function1" + } and { function = "function2" } respectively. + + This field is a member of `oneof`_ ``_url_mask``. + """ + + function = proto.Field( + proto.STRING, + number=307196888, + optional=True, + ) + url_mask = proto.Field( + proto.STRING, + number=103352252, + optional=True, + ) + + +class NetworkEndpointGroupCloudRun(proto.Message): + r"""Configuration for a Cloud Run network endpoint group (NEG). + The service must be provided explicitly or in the URL mask. The + tag is optional, may be provided explicitly or in the URL mask. + Note: Cloud Run service must be in the same project and located + in the same region as the Serverless NEG. + + Attributes: + service (str): + Cloud Run service is the main resource of + Cloud Run. The service must be 1-63 characters + long, and comply with RFC1035. Example value: + "run-service". + + This field is a member of `oneof`_ ``_service``. + tag (str): + Optional Cloud Run tag represents the "named- + evision" to provide additional fine-grained + traffic routing information. The tag must be + 1-63 characters long, and comply with RFC1035. + Example value: "revision-0010". + + This field is a member of `oneof`_ ``_tag``. + url_mask (str): + A template to parse service and tag fields + from a request URL. URL mask allows for routing + to multiple Run services without having to + create multiple network endpoint groups and + backend services. For example, request URLs + "foo1.domain.com/bar1" and + "foo1.domain.com/bar2" can be backed by the same + Serverless Network Endpoint Group (NEG) with URL + mask ".domain.com/". The URL mask will parse + them to { service="bar1", tag="foo1" } and { + service="bar2", tag="foo2" } respectively. + + This field is a member of `oneof`_ ``_url_mask``. + """ + + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + tag = proto.Field( + proto.STRING, + number=114586, + optional=True, + ) + url_mask = proto.Field( + proto.STRING, + number=103352252, + optional=True, + ) + + +class NetworkEndpointGroupList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NetworkEndpointGroup]): + A list of NetworkEndpointGroup resources. + kind (str): + [Output Only] The resource type, which is always + compute#networkEndpointGroupList for network endpoint group + lists. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='NetworkEndpointGroup', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NetworkEndpointGroupsAttachEndpointsRequest(proto.Message): + r""" + + Attributes: + network_endpoints (Sequence[google.cloud.compute_v1.types.NetworkEndpoint]): + The list of network endpoints to be attached. + """ + + network_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=149850285, + message='NetworkEndpoint', + ) + + +class NetworkEndpointGroupsDetachEndpointsRequest(proto.Message): + r""" + + Attributes: + network_endpoints (Sequence[google.cloud.compute_v1.types.NetworkEndpoint]): + The list of network endpoints to be detached. + """ + + network_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=149850285, + message='NetworkEndpoint', + ) + + +class NetworkEndpointGroupsListEndpointsRequest(proto.Message): + r""" + + Attributes: + health_status (str): + Optional query parameter for showing the + health status of each network endpoint. Valid + options are SKIP or SHOW. If you don't specify + this parameter, the health status of network + endpoints will not be provided. Check the + HealthStatus enum for the list of possible + values. + + This field is a member of `oneof`_ ``_health_status``. + """ + class HealthStatus(proto.Enum): + r"""Optional query parameter for showing the health status of + each network endpoint. Valid options are SKIP or SHOW. If you + don't specify this parameter, the health status of network + endpoints will not be provided. + """ + UNDEFINED_HEALTH_STATUS = 0 + SHOW = 2544381 + SKIP = 2547071 + + health_status = proto.Field( + proto.STRING, + number=380545845, + optional=True, + ) + + +class NetworkEndpointGroupsListNetworkEndpoints(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NetworkEndpointWithHealthStatus]): + A list of NetworkEndpointWithHealthStatus + resources. + kind (str): + [Output Only] The resource type, which is always + compute#networkEndpointGroupsListNetworkEndpoints for the + list of network endpoints in the specified network endpoint + group. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='NetworkEndpointWithHealthStatus', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NetworkEndpointGroupsScopedList(proto.Message): + r""" + + Attributes: + network_endpoint_groups (Sequence[google.cloud.compute_v1.types.NetworkEndpointGroup]): + [Output Only] The list of network endpoint groups that are + contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] An informational warning that replaces the + list of network endpoint groups when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + network_endpoint_groups = proto.RepeatedField( + proto.MESSAGE, + number=29346733, + message='NetworkEndpointGroup', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NetworkEndpointWithHealthStatus(proto.Message): + r""" + + Attributes: + healths (Sequence[google.cloud.compute_v1.types.HealthStatusForNetworkEndpoint]): + [Output only] The health status of network endpoint; + network_endpoint (google.cloud.compute_v1.types.NetworkEndpoint): + [Output only] The network endpoint; + + This field is a member of `oneof`_ ``_network_endpoint``. + """ + + healths = proto.RepeatedField( + proto.MESSAGE, + number=258689431, + message='HealthStatusForNetworkEndpoint', + ) + network_endpoint = proto.Field( + proto.MESSAGE, + number=56789126, + optional=True, + message='NetworkEndpoint', + ) + + +class NetworkInterface(proto.Message): + r"""A network interface resource attached to an instance. + + Attributes: + access_configs (Sequence[google.cloud.compute_v1.types.AccessConfig]): + An array of configurations for this interface. Currently, + only one access config, ONE_TO_ONE_NAT, is supported. If + there are no accessConfigs specified, then this instance + will have no external internet access. + alias_ip_ranges (Sequence[google.cloud.compute_v1.types.AliasIpRange]): + An array of alias IP ranges for this network + interface. You can only specify this field for + network interfaces in VPC networks. + fingerprint (str): + Fingerprint hash of contents stored in this + network interface. This field will be ignored + when inserting an Instance or adding a + NetworkInterface. An up-to-date fingerprint must + be provided in order to update the + NetworkInterface. The request will fail with + error 400 Bad Request if the fingerprint is not + provided, or 412 Precondition Failed if the + fingerprint is out of date. + + This field is a member of `oneof`_ ``_fingerprint``. + ipv6_access_configs (Sequence[google.cloud.compute_v1.types.AccessConfig]): + An array of IPv6 access configurations for this interface. + Currently, only one IPv6 access config, DIRECT_IPV6, is + supported. If there is no ipv6AccessConfig specified, then + this instance will have no external IPv6 Internet access. + ipv6_access_type (str): + [Output Only] One of EXTERNAL, INTERNAL to indicate whether + the IP can be accessed from the Internet. This field is + always inherited from its subnetwork. Valid only if + stackType is IPV4_IPV6. Check the Ipv6AccessType enum for + the list of possible values. + + This field is a member of `oneof`_ ``_ipv6_access_type``. + ipv6_address (str): + [Output Only] An IPv6 internal network address for this + network interface. + + This field is a member of `oneof`_ ``_ipv6_address``. + kind (str): + [Output Only] Type of the resource. Always + compute#networkInterface for network interfaces. + + This field is a member of `oneof`_ ``_kind``. + name (str): + [Output Only] The name of the network interface, which is + generated by the server. For network devices, these are + eth0, eth1, etc. + + This field is a member of `oneof`_ ``_name``. + network (str): + URL of the network resource for this + instance. When creating an instance, if neither + the network nor the subnetwork is specified, the + default network global/networks/default is used; + if the network is not specified but the + subnetwork is specified, the network is + inferred. If you specify this property, you can + specify the network as a full or partial URL. + For example, the following are all valid URLs: - + https://www.googleapis.com/compute/v1/projects/project/global/networks/ + network - + projects/project/global/networks/network - + global/networks/default + + This field is a member of `oneof`_ ``_network``. + network_i_p (str): + An IPv4 internal IP address to assign to the + instance for this network interface. If not + specified by the user, an unused internal IP is + assigned by the system. + + This field is a member of `oneof`_ ``_network_i_p``. + nic_type (str): + The type of vNIC to be used on this + interface. This may be gVNIC or VirtioNet. Check + the NicType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_nic_type``. + queue_count (int): + The networking queue count that's specified + by users for the network interface. Both Rx and + Tx queues will be set to this number. It'll be + empty if not specified by the users. + + This field is a member of `oneof`_ ``_queue_count``. + stack_type (str): + The stack type for this network interface to identify + whether the IPv6 feature is enabled or not. If not + specified, IPV4_ONLY will be used. This field can be both + set at instance creation and update network interface + operations. Check the StackType enum for the list of + possible values. + + This field is a member of `oneof`_ ``_stack_type``. + subnetwork (str): + The URL of the Subnetwork resource for this + instance. If the network resource is in legacy + mode, do not specify this field. If the network + is in auto subnet mode, specifying the + subnetwork is optional. If the network is in + custom subnet mode, specifying the subnetwork is + required. If you specify this field, you can + specify the subnetwork as a full or partial URL. + For example, the following are all valid URLs: - + https://www.googleapis.com/compute/v1/projects/project/regions/region + /subnetworks/subnetwork - + regions/region/subnetworks/subnetwork + + This field is a member of `oneof`_ ``_subnetwork``. + """ + class Ipv6AccessType(proto.Enum): + r"""[Output Only] One of EXTERNAL, INTERNAL to indicate whether the IP + can be accessed from the Internet. This field is always inherited + from its subnetwork. Valid only if stackType is IPV4_IPV6. + """ + UNDEFINED_IPV6_ACCESS_TYPE = 0 + EXTERNAL = 35607499 + UNSPECIFIED_IPV6_ACCESS_TYPE = 313080613 + + class NicType(proto.Enum): + r"""The type of vNIC to be used on this interface. This may be + gVNIC or VirtioNet. + """ + UNDEFINED_NIC_TYPE = 0 + GVNIC = 68209305 + UNSPECIFIED_NIC_TYPE = 67411801 + VIRTIO_NET = 452123481 + + class StackType(proto.Enum): + r"""The stack type for this network interface to identify whether the + IPv6 feature is enabled or not. If not specified, IPV4_ONLY will be + used. This field can be both set at instance creation and update + network interface operations. + """ + UNDEFINED_STACK_TYPE = 0 + IPV4_IPV6 = 22197249 + IPV4_ONLY = 22373798 + UNSPECIFIED_STACK_TYPE = 298084569 + + access_configs = proto.RepeatedField( + proto.MESSAGE, + number=111058326, + message='AccessConfig', + ) + alias_ip_ranges = proto.RepeatedField( + proto.MESSAGE, + number=165085631, + message='AliasIpRange', + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + ipv6_access_configs = proto.RepeatedField( + proto.MESSAGE, + number=483472110, + message='AccessConfig', + ) + ipv6_access_type = proto.Field( + proto.STRING, + number=504658653, + optional=True, + ) + ipv6_address = proto.Field( + proto.STRING, + number=341563804, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + network_i_p = proto.Field( + proto.STRING, + number=207181961, + optional=True, + ) + nic_type = proto.Field( + proto.STRING, + number=59810577, + optional=True, + ) + queue_count = proto.Field( + proto.INT32, + number=503708769, + optional=True, + ) + stack_type = proto.Field( + proto.STRING, + number=425908881, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + optional=True, + ) + + +class NetworkList(proto.Message): + r"""Contains a list of networks. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Network]): + A list of Network resources. + kind (str): + [Output Only] Type of resource. Always compute#networkList + for lists of networks. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Network', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NetworkPeering(proto.Message): + r"""A network peering attached to a network resource. The message + includes the peering name, peer network, peering state, and a + flag indicating whether Google Compute Engine should + automatically create routes for the peering. + + Attributes: + auto_create_routes (bool): + This field will be deprecated soon. Use the + exchange_subnet_routes field instead. Indicates whether full + mesh connectivity is created and managed automatically + between peered networks. Currently this field should always + be true since Google Compute Engine will automatically + create and manage subnetwork routes between two networks + when peering state is ACTIVE. + + This field is a member of `oneof`_ ``_auto_create_routes``. + exchange_subnet_routes (bool): + Indicates whether full mesh connectivity is + created and managed automatically between peered + networks. Currently this field should always be + true since Google Compute Engine will + automatically create and manage subnetwork + routes between two networks when peering state + is ACTIVE. + + This field is a member of `oneof`_ ``_exchange_subnet_routes``. + export_custom_routes (bool): + Whether to export the custom routes to peer + network. + + This field is a member of `oneof`_ ``_export_custom_routes``. + export_subnet_routes_with_public_ip (bool): + Whether subnet routes with public IP range + are exported. The default value is true, all + subnet routes are exported. IPv4 special-use + ranges are always exported to peers and are not + controlled by this field. + + This field is a member of `oneof`_ ``_export_subnet_routes_with_public_ip``. + import_custom_routes (bool): + Whether to import the custom routes from peer + network. + + This field is a member of `oneof`_ ``_import_custom_routes``. + import_subnet_routes_with_public_ip (bool): + Whether subnet routes with public IP range + are imported. The default value is false. IPv4 + special-use ranges are always imported from + peers and are not controlled by this field. + + This field is a member of `oneof`_ ``_import_subnet_routes_with_public_ip``. + name (str): + Name of this peering. Provided by the client when the + peering is created. The name must comply with RFC1035. + Specifically, the name must be 1-63 characters long and + match regular expression ``[a-z]([-a-z0-9]*[a-z0-9])?``. The + first character must be a lowercase letter, and all the + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network (str): + The URL of the peer network. It can be either + full URL or partial URL. The peer network may + belong to a different project. If the partial + URL does not contain project, it is assumed that + the peer network is in the same project as the + current network. + + This field is a member of `oneof`_ ``_network``. + peer_mtu (int): + Maximum Transmission Unit in bytes. + + This field is a member of `oneof`_ ``_peer_mtu``. + state (str): + [Output Only] State for the peering, either ``ACTIVE`` or + ``INACTIVE``. The peering is ``ACTIVE`` when there's a + matching configuration in the peer network. Check the State + enum for the list of possible values. + + This field is a member of `oneof`_ ``_state``. + state_details (str): + [Output Only] Details about the current state of the + peering. + + This field is a member of `oneof`_ ``_state_details``. + """ + class State(proto.Enum): + r"""[Output Only] State for the peering, either ``ACTIVE`` or + ``INACTIVE``. The peering is ``ACTIVE`` when there's a matching + configuration in the peer network. + """ + UNDEFINED_STATE = 0 + ACTIVE = 314733318 + INACTIVE = 270421099 + + auto_create_routes = proto.Field( + proto.BOOL, + number=57454941, + optional=True, + ) + exchange_subnet_routes = proto.Field( + proto.BOOL, + number=26322256, + optional=True, + ) + export_custom_routes = proto.Field( + proto.BOOL, + number=60281485, + optional=True, + ) + export_subnet_routes_with_public_ip = proto.Field( + proto.BOOL, + number=97940834, + optional=True, + ) + import_custom_routes = proto.Field( + proto.BOOL, + number=197982398, + optional=True, + ) + import_subnet_routes_with_public_ip = proto.Field( + proto.BOOL, + number=14419729, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + peer_mtu = proto.Field( + proto.INT32, + number=69584721, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + state_details = proto.Field( + proto.STRING, + number=95566996, + optional=True, + ) + + +class NetworkRoutingConfig(proto.Message): + r"""A routing configuration attached to a network resource. The + message includes the list of routers associated with the + network, and a flag indicating the type of routing behavior to + enforce network-wide. + + Attributes: + routing_mode (str): + The network-wide routing mode to use. If set + to REGIONAL, this network's Cloud Routers will + only advertise routes with subnets of this + network in the same region as the router. If set + to GLOBAL, this network's Cloud Routers will + advertise routes with all subnets of this + network, across regions. Check the RoutingMode + enum for the list of possible values. + + This field is a member of `oneof`_ ``_routing_mode``. + """ + class RoutingMode(proto.Enum): + r"""The network-wide routing mode to use. If set to REGIONAL, + this network's Cloud Routers will only advertise routes with + subnets of this network in the same region as the router. If set + to GLOBAL, this network's Cloud Routers will advertise routes + with all subnets of this network, across regions. + """ + UNDEFINED_ROUTING_MODE = 0 + GLOBAL = 494663587 + REGIONAL = 92288543 + + routing_mode = proto.Field( + proto.STRING, + number=475143548, + optional=True, + ) + + +class NetworksAddPeeringRequest(proto.Message): + r""" + + Attributes: + auto_create_routes (bool): + This field will be deprecated soon. Use + exchange_subnet_routes in network_peering instead. Indicates + whether full mesh connectivity is created and managed + automatically between peered networks. Currently this field + should always be true since Google Compute Engine will + automatically create and manage subnetwork routes between + two networks when peering state is ACTIVE. + + This field is a member of `oneof`_ ``_auto_create_routes``. + name (str): + Name of the peering, which should conform to + RFC1035. + + This field is a member of `oneof`_ ``_name``. + network_peering (google.cloud.compute_v1.types.NetworkPeering): + Network peering parameters. In order to specify route + policies for peering using import and export custom routes, + you must specify all peering related parameters (name, peer + network, exchange_subnet_routes) in the network_peering + field. The corresponding fields in NetworksAddPeeringRequest + will be deprecated soon. + + This field is a member of `oneof`_ ``_network_peering``. + peer_network (str): + URL of the peer network. It can be either + full URL or partial URL. The peer network may + belong to a different project. If the partial + URL does not contain project, it is assumed that + the peer network is in the same project as the + current network. + + This field is a member of `oneof`_ ``_peer_network``. + """ + + auto_create_routes = proto.Field( + proto.BOOL, + number=57454941, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network_peering = proto.Field( + proto.MESSAGE, + number=328926767, + optional=True, + message='NetworkPeering', + ) + peer_network = proto.Field( + proto.STRING, + number=500625489, + optional=True, + ) + + +class NetworksGetEffectiveFirewallsResponse(proto.Message): + r""" + + Attributes: + firewall_policys (Sequence[google.cloud.compute_v1.types.NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy]): + Effective firewalls from firewall policy. + firewalls (Sequence[google.cloud.compute_v1.types.Firewall]): + Effective firewalls on the network. + """ + + firewall_policys = proto.RepeatedField( + proto.MESSAGE, + number=410985794, + message='NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy', + ) + firewalls = proto.RepeatedField( + proto.MESSAGE, + number=272245619, + message='Firewall', + ) + + +class NetworksGetEffectiveFirewallsResponseEffectiveFirewallPolicy(proto.Message): + r""" + + Attributes: + display_name (str): + [Output Only] Deprecated, please use short name instead. The + display name of the firewall policy. + + This field is a member of `oneof`_ ``_display_name``. + name (str): + [Output Only] The name of the firewall policy. + + This field is a member of `oneof`_ ``_name``. + rules (Sequence[google.cloud.compute_v1.types.FirewallPolicyRule]): + The rules that apply to the network. + short_name (str): + [Output Only] The short name of the firewall policy. + + This field is a member of `oneof`_ ``_short_name``. + type_ (str): + [Output Only] The type of the firewall policy. Check the + Type enum for the list of possible values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""[Output Only] The type of the firewall policy.""" + UNDEFINED_TYPE = 0 + HIERARCHY = 69902869 + NETWORK = 413984270 + UNSPECIFIED = 526786327 + + display_name = proto.Field( + proto.STRING, + number=4473832, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + rules = proto.RepeatedField( + proto.MESSAGE, + number=108873975, + message='FirewallPolicyRule', + ) + short_name = proto.Field( + proto.STRING, + number=492051566, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class NetworksRemovePeeringRequest(proto.Message): + r""" + + Attributes: + name (str): + Name of the peering, which should conform to + RFC1035. + + This field is a member of `oneof`_ ``_name``. + """ + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + + +class NetworksUpdatePeeringRequest(proto.Message): + r""" + + Attributes: + network_peering (google.cloud.compute_v1.types.NetworkPeering): + + This field is a member of `oneof`_ ``_network_peering``. + """ + + network_peering = proto.Field( + proto.MESSAGE, + number=328926767, + optional=True, + message='NetworkPeering', + ) + + +class NodeGroup(proto.Message): + r"""Represents a sole-tenant Node Group resource. A sole-tenant + node is a physical server that is dedicated to hosting VM + instances only for your specific project. Use sole-tenant nodes + to keep your instances physically separated from instances in + other projects, or to group your instances together on the same + host hardware. For more information, read Sole-tenant nodes. + + Attributes: + autoscaling_policy (google.cloud.compute_v1.types.NodeGroupAutoscalingPolicy): + Specifies how autoscaling should behave. + + This field is a member of `oneof`_ ``_autoscaling_policy``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] The type of the resource. Always + compute#nodeGroup for node group. + + This field is a member of `oneof`_ ``_kind``. + location_hint (str): + An opaque location hint used to place the Node close to + other resources. This field is for use by internal tools + that use the public API. The location hint here on the + NodeGroup overrides any location_hint present in the + NodeTemplate. + + This field is a member of `oneof`_ ``_location_hint``. + maintenance_policy (str): + Specifies how to handle instances when a node in the group + undergoes maintenance. Set to one of: DEFAULT, + RESTART_IN_PLACE, or MIGRATE_WITHIN_NODE_GROUP. The default + value is DEFAULT. For more information, see Maintenance + policies. Check the MaintenancePolicy enum for the list of + possible values. + + This field is a member of `oneof`_ ``_maintenance_policy``. + maintenance_window (google.cloud.compute_v1.types.NodeGroupMaintenanceWindow): + + This field is a member of `oneof`_ ``_maintenance_window``. + name (str): + The name of the resource, provided by the client when + initially creating the resource. The resource name must be + 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + node_template (str): + URL of the node template to create the node + group from. + + This field is a member of `oneof`_ ``_node_template``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + size (int): + [Output Only] The total number of nodes in the node group. + + This field is a member of `oneof`_ ``_size``. + status (str): + Check the Status enum for the list of + possible values. + + This field is a member of `oneof`_ ``_status``. + zone (str): + [Output Only] The name of the zone where the node group + resides, such as us-central1-a. + + This field is a member of `oneof`_ ``_zone``. + """ + class MaintenancePolicy(proto.Enum): + r"""Specifies how to handle instances when a node in the group undergoes + maintenance. Set to one of: DEFAULT, RESTART_IN_PLACE, or + MIGRATE_WITHIN_NODE_GROUP. The default value is DEFAULT. For more + information, see Maintenance policies. + """ + UNDEFINED_MAINTENANCE_POLICY = 0 + DEFAULT = 115302945 + MAINTENANCE_POLICY_UNSPECIFIED = 72964182 + MIGRATE_WITHIN_NODE_GROUP = 153483394 + RESTART_IN_PLACE = 228647325 + + class Status(proto.Enum): + r"""""" + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + INVALID = 530283991 + READY = 77848963 + + autoscaling_policy = proto.Field( + proto.MESSAGE, + number=221950041, + optional=True, + message='NodeGroupAutoscalingPolicy', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + location_hint = proto.Field( + proto.STRING, + number=350519505, + optional=True, + ) + maintenance_policy = proto.Field( + proto.STRING, + number=528327646, + optional=True, + ) + maintenance_window = proto.Field( + proto.MESSAGE, + number=186374812, + optional=True, + message='NodeGroupMaintenanceWindow', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + node_template = proto.Field( + proto.STRING, + number=323154455, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + size = proto.Field( + proto.INT32, + number=3530753, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class NodeGroupAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NodeGroupAggregatedList.ItemsEntry]): + A list of NodeGroupsScopedList resources. + kind (str): + [Output Only] Type of resource.Always + compute#nodeGroupAggregatedList for aggregated lists of node + groups. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='NodeGroupsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeGroupAutoscalingPolicy(proto.Message): + r""" + + Attributes: + max_nodes (int): + The maximum number of nodes that the group + should have. Must be set if autoscaling is + enabled. Maximum value allowed is 100. + + This field is a member of `oneof`_ ``_max_nodes``. + min_nodes (int): + The minimum number of nodes that the group + should have. + + This field is a member of `oneof`_ ``_min_nodes``. + mode (str): + The autoscaling mode. Set to one of: ON, OFF, or + ONLY_SCALE_OUT. For more information, see Autoscaler modes. + Check the Mode enum for the list of possible values. + + This field is a member of `oneof`_ ``_mode``. + """ + class Mode(proto.Enum): + r"""The autoscaling mode. Set to one of: ON, OFF, or ONLY_SCALE_OUT. For + more information, see Autoscaler modes. + """ + UNDEFINED_MODE = 0 + MODE_UNSPECIFIED = 371348091 + OFF = 78159 + ON = 2527 + ONLY_SCALE_OUT = 152713670 + + max_nodes = proto.Field( + proto.INT32, + number=297762838, + optional=True, + ) + min_nodes = proto.Field( + proto.INT32, + number=533370500, + optional=True, + ) + mode = proto.Field( + proto.STRING, + number=3357091, + optional=True, + ) + + +class NodeGroupList(proto.Message): + r"""Contains a list of nodeGroups. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NodeGroup]): + A list of NodeGroup resources. + kind (str): + [Output Only] Type of resource.Always compute#nodeGroupList + for lists of node groups. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='NodeGroup', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeGroupMaintenanceWindow(proto.Message): + r"""Time window specified for daily maintenance operations. GCE's + internal maintenance will be performed within this window. + + Attributes: + maintenance_duration (google.cloud.compute_v1.types.Duration): + [Output only] A predetermined duration for the window, + automatically chosen to be the smallest possible in the + given scenario. + + This field is a member of `oneof`_ ``_maintenance_duration``. + start_time (str): + Start time of the window. This must be in UTC + format that resolves to one of 00:00, 04:00, + 08:00, 12:00, 16:00, or 20:00. For example, both + 13:00-5 and 08:00 are valid. + + This field is a member of `oneof`_ ``_start_time``. + """ + + maintenance_duration = proto.Field( + proto.MESSAGE, + number=525291840, + optional=True, + message='Duration', + ) + start_time = proto.Field( + proto.STRING, + number=37467274, + optional=True, + ) + + +class NodeGroupNode(proto.Message): + r""" + + Attributes: + accelerators (Sequence[google.cloud.compute_v1.types.AcceleratorConfig]): + Accelerators for this node. + cpu_overcommit_type (str): + CPU overcommit. + Check the CpuOvercommitType enum for the list of + possible values. + + This field is a member of `oneof`_ ``_cpu_overcommit_type``. + disks (Sequence[google.cloud.compute_v1.types.LocalDisk]): + Local disk configurations. + instances (Sequence[str]): + Instances scheduled on this node. + name (str): + The name of the node. + + This field is a member of `oneof`_ ``_name``. + node_type (str): + The type of this node. + + This field is a member of `oneof`_ ``_node_type``. + satisfies_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + server_binding (google.cloud.compute_v1.types.ServerBinding): + Binding properties for the physical server. + + This field is a member of `oneof`_ ``_server_binding``. + server_id (str): + Server ID associated with this node. + + This field is a member of `oneof`_ ``_server_id``. + status (str): + Check the Status enum for the list of + possible values. + + This field is a member of `oneof`_ ``_status``. + """ + class CpuOvercommitType(proto.Enum): + r"""CPU overcommit.""" + UNDEFINED_CPU_OVERCOMMIT_TYPE = 0 + CPU_OVERCOMMIT_TYPE_UNSPECIFIED = 520665615 + ENABLED = 182130465 + NONE = 2402104 + + class Status(proto.Enum): + r"""""" + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + INVALID = 530283991 + READY = 77848963 + REPAIRING = 413483285 + + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=269577064, + message='AcceleratorConfig', + ) + cpu_overcommit_type = proto.Field( + proto.STRING, + number=247727959, + optional=True, + ) + disks = proto.RepeatedField( + proto.MESSAGE, + number=95594102, + message='LocalDisk', + ) + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + node_type = proto.Field( + proto.STRING, + number=465832791, + optional=True, + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + server_binding = proto.Field( + proto.MESSAGE, + number=208179593, + optional=True, + message='ServerBinding', + ) + server_id = proto.Field( + proto.STRING, + number=339433367, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class NodeGroupsAddNodesRequest(proto.Message): + r""" + + Attributes: + additional_node_count (int): + Count of additional nodes to be added to the + node group. + + This field is a member of `oneof`_ ``_additional_node_count``. + """ + + additional_node_count = proto.Field( + proto.INT32, + number=134997930, + optional=True, + ) + + +class NodeGroupsDeleteNodesRequest(proto.Message): + r""" + + Attributes: + nodes (Sequence[str]): + Names of the nodes to delete. + """ + + nodes = proto.RepeatedField( + proto.STRING, + number=104993457, + ) + + +class NodeGroupsListNodes(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NodeGroupNode]): + A list of Node resources. + kind (str): + [Output Only] The resource type, which is always + compute.nodeGroupsListNodes for the list of nodes in the + specified node group. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='NodeGroupNode', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeGroupsScopedList(proto.Message): + r""" + + Attributes: + node_groups (Sequence[google.cloud.compute_v1.types.NodeGroup]): + [Output Only] A list of node groups contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] An informational warning that appears when the + nodeGroup list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + node_groups = proto.RepeatedField( + proto.MESSAGE, + number=73188017, + message='NodeGroup', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeGroupsSetNodeTemplateRequest(proto.Message): + r""" + + Attributes: + node_template (str): + Full or partial URL of the node template + resource to be updated for this node group. + + This field is a member of `oneof`_ ``_node_template``. + """ + + node_template = proto.Field( + proto.STRING, + number=323154455, + optional=True, + ) + + +class NodeTemplate(proto.Message): + r"""Represent a sole-tenant Node Template resource. You can use a + template to define properties for nodes in a node group. For + more information, read Creating node groups and instances. + + Attributes: + accelerators (Sequence[google.cloud.compute_v1.types.AcceleratorConfig]): + + cpu_overcommit_type (str): + CPU overcommit. + Check the CpuOvercommitType enum for the list of + possible values. + + This field is a member of `oneof`_ ``_cpu_overcommit_type``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + disks (Sequence[google.cloud.compute_v1.types.LocalDisk]): + + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] The type of the resource. Always + compute#nodeTemplate for node templates. + + This field is a member of `oneof`_ ``_kind``. + name (str): + The name of the resource, provided by the client when + initially creating the resource. The resource name must be + 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + node_affinity_labels (Sequence[google.cloud.compute_v1.types.NodeTemplate.NodeAffinityLabelsEntry]): + Labels to use for node affinity, which will + be used in instance scheduling. + node_type (str): + The node type to use for nodes group that are + created from this template. + + This field is a member of `oneof`_ ``_node_type``. + node_type_flexibility (google.cloud.compute_v1.types.NodeTemplateNodeTypeFlexibility): + The flexible properties of the desired node type. Node + groups that use this node template will create nodes of a + type that matches these properties. This field is mutually + exclusive with the node_type property; you can only define + one or the other, but not both. + + This field is a member of `oneof`_ ``_node_type_flexibility``. + region (str): + [Output Only] The name of the region where the node template + resides, such as us-central1. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + server_binding (google.cloud.compute_v1.types.ServerBinding): + Sets the binding properties for the physical server. Valid + values include: - *[Default]* RESTART_NODE_ON_ANY_SERVER: + Restarts VMs on any available physical server - + RESTART_NODE_ON_MINIMAL_SERVER: Restarts VMs on the same + physical server whenever possible See Sole-tenant node + options for more information. + + This field is a member of `oneof`_ ``_server_binding``. + status (str): + [Output Only] The status of the node template. One of the + following values: CREATING, READY, and DELETING. Check the + Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + status_message (str): + [Output Only] An optional, human-readable explanation of the + status. + + This field is a member of `oneof`_ ``_status_message``. + """ + class CpuOvercommitType(proto.Enum): + r"""CPU overcommit.""" + UNDEFINED_CPU_OVERCOMMIT_TYPE = 0 + CPU_OVERCOMMIT_TYPE_UNSPECIFIED = 520665615 + ENABLED = 182130465 + NONE = 2402104 + + class Status(proto.Enum): + r"""[Output Only] The status of the node template. One of the following + values: CREATING, READY, and DELETING. + """ + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + INVALID = 530283991 + READY = 77848963 + + accelerators = proto.RepeatedField( + proto.MESSAGE, + number=269577064, + message='AcceleratorConfig', + ) + cpu_overcommit_type = proto.Field( + proto.STRING, + number=247727959, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disks = proto.RepeatedField( + proto.MESSAGE, + number=95594102, + message='LocalDisk', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + node_affinity_labels = proto.MapField( + proto.STRING, + proto.STRING, + number=339007161, + ) + node_type = proto.Field( + proto.STRING, + number=465832791, + optional=True, + ) + node_type_flexibility = proto.Field( + proto.MESSAGE, + number=315257905, + optional=True, + message='NodeTemplateNodeTypeFlexibility', + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + server_binding = proto.Field( + proto.MESSAGE, + number=208179593, + optional=True, + message='ServerBinding', + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + status_message = proto.Field( + proto.STRING, + number=297428154, + optional=True, + ) + + +class NodeTemplateAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NodeTemplateAggregatedList.ItemsEntry]): + A list of NodeTemplatesScopedList resources. + kind (str): + [Output Only] Type of resource.Always + compute#nodeTemplateAggregatedList for aggregated lists of + node templates. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='NodeTemplatesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeTemplateList(proto.Message): + r"""Contains a list of node templates. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NodeTemplate]): + A list of NodeTemplate resources. + kind (str): + [Output Only] Type of resource.Always + compute#nodeTemplateList for lists of node templates. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='NodeTemplate', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeTemplateNodeTypeFlexibility(proto.Message): + r""" + + Attributes: + cpus (str): + + This field is a member of `oneof`_ ``_cpus``. + local_ssd (str): + + This field is a member of `oneof`_ ``_local_ssd``. + memory (str): + + This field is a member of `oneof`_ ``_memory``. + """ + + cpus = proto.Field( + proto.STRING, + number=3060683, + optional=True, + ) + local_ssd = proto.Field( + proto.STRING, + number=405741360, + optional=True, + ) + memory = proto.Field( + proto.STRING, + number=532856065, + optional=True, + ) + + +class NodeTemplatesScopedList(proto.Message): + r""" + + Attributes: + node_templates (Sequence[google.cloud.compute_v1.types.NodeTemplate]): + [Output Only] A list of node templates contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] An informational warning that appears when the + node templates list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + node_templates = proto.RepeatedField( + proto.MESSAGE, + number=354111804, + message='NodeTemplate', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeType(proto.Message): + r"""Represent a sole-tenant Node Type resource. Each node within + a node group must have a node type. A node type specifies the + total amount of cores and memory for that node. Currently, the + only available node type is n1-node-96-624 node type that has 96 + vCPUs and 624 GB of memory, available in multiple zones. For + more information read Node types. + + Attributes: + cpu_platform (str): + [Output Only] The CPU platform used by this node type. + + This field is a member of `oneof`_ ``_cpu_platform``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + deprecated (google.cloud.compute_v1.types.DeprecationStatus): + [Output Only] The deprecation status associated with this + node type. + + This field is a member of `oneof`_ ``_deprecated``. + description (str): + [Output Only] An optional textual description of the + resource. + + This field is a member of `oneof`_ ``_description``. + guest_cpus (int): + [Output Only] The number of virtual CPUs that are available + to the node type. + + This field is a member of `oneof`_ ``_guest_cpus``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] The type of the resource. Always + compute#nodeType for node types. + + This field is a member of `oneof`_ ``_kind``. + local_ssd_gb (int): + [Output Only] Local SSD available to the node type, defined + in GB. + + This field is a member of `oneof`_ ``_local_ssd_gb``. + memory_mb (int): + [Output Only] The amount of physical memory available to the + node type, defined in MB. + + This field is a member of `oneof`_ ``_memory_mb``. + name (str): + [Output Only] Name of the resource. + + This field is a member of `oneof`_ ``_name``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + zone (str): + [Output Only] The name of the zone where the node type + resides, such as us-central1-a. + + This field is a member of `oneof`_ ``_zone``. + """ + + cpu_platform = proto.Field( + proto.STRING, + number=410285354, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + deprecated = proto.Field( + proto.MESSAGE, + number=515138995, + optional=True, + message='DeprecationStatus', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + guest_cpus = proto.Field( + proto.INT32, + number=393356754, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + local_ssd_gb = proto.Field( + proto.INT32, + number=329237578, + optional=True, + ) + memory_mb = proto.Field( + proto.INT32, + number=116001171, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class NodeTypeAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NodeTypeAggregatedList.ItemsEntry]): + A list of NodeTypesScopedList resources. + kind (str): + [Output Only] Type of resource.Always + compute#nodeTypeAggregatedList for aggregated lists of node + types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='NodeTypesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeTypeList(proto.Message): + r"""Contains a list of node types. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NodeType]): + A list of NodeType resources. + kind (str): + [Output Only] Type of resource.Always compute#nodeTypeList + for lists of node types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='NodeType', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NodeTypesScopedList(proto.Message): + r""" + + Attributes: + node_types (Sequence[google.cloud.compute_v1.types.NodeType]): + [Output Only] A list of node types contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] An informational warning that appears when the + node types list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + node_types = proto.RepeatedField( + proto.MESSAGE, + number=482172924, + message='NodeType', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class NotificationEndpoint(proto.Message): + r"""Represents a notification endpoint. A notification endpoint + resource defines an endpoint to receive notifications when there + are status changes detected by the associated health check + service. For more information, see Health checks overview. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + grpc_settings (google.cloud.compute_v1.types.NotificationEndpointGrpcSettings): + Settings of the gRPC notification endpoint + including the endpoint URL and the retry + duration. + + This field is a member of `oneof`_ ``_grpc_settings``. + id (int): + [Output Only] A unique identifier for this resource type. + The server generates this identifier. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#notificationEndpoint for notification endpoints. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + region (str): + [Output Only] URL of the region where the notification + endpoint resides. This field applies only to the regional + resource. You must specify this field as part of the HTTP + request URL. It is not settable as a field in the request + body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + grpc_settings = proto.Field( + proto.MESSAGE, + number=456139556, + optional=True, + message='NotificationEndpointGrpcSettings', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class NotificationEndpointGrpcSettings(proto.Message): + r"""Represents a gRPC setting that describes one gRPC + notification endpoint and the retry duration attempting to send + notification to this endpoint. + + Attributes: + authority (str): + Optional. If specified, this field is used to + set the authority header by the sender of + notifications. See + https://tools.ietf.org/html/rfc7540#section-8.1.2.3 + + This field is a member of `oneof`_ ``_authority``. + endpoint (str): + Endpoint to which gRPC notifications are + sent. This must be a valid gRPCLB DNS name. + + This field is a member of `oneof`_ ``_endpoint``. + payload_name (str): + Optional. If specified, this field is used to + populate the "name" field in gRPC requests. + + This field is a member of `oneof`_ ``_payload_name``. + resend_interval (google.cloud.compute_v1.types.Duration): + Optional. This field is used to configure how + often to send a full update of all non-healthy + backends. If unspecified, full updates are not + sent. If specified, must be in the range between + 600 seconds to 3600 seconds. Nanos are + disallowed. + + This field is a member of `oneof`_ ``_resend_interval``. + retry_duration_sec (int): + How much time (in seconds) is spent + attempting notification retries until a + successful response is received. Default is 30s. + Limit is 20m (1200s). Must be a positive number. + + This field is a member of `oneof`_ ``_retry_duration_sec``. + """ + + authority = proto.Field( + proto.STRING, + number=401868611, + optional=True, + ) + endpoint = proto.Field( + proto.STRING, + number=130489749, + optional=True, + ) + payload_name = proto.Field( + proto.STRING, + number=300358300, + optional=True, + ) + resend_interval = proto.Field( + proto.MESSAGE, + number=478288969, + optional=True, + message='Duration', + ) + retry_duration_sec = proto.Field( + proto.UINT32, + number=115681117, + optional=True, + ) + + +class NotificationEndpointList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.NotificationEndpoint]): + A list of NotificationEndpoint resources. + kind (str): + [Output Only] Type of the resource. Always + compute#notificationEndpoint for notification endpoints. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='NotificationEndpoint', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class Operation(proto.Message): + r"""Represents an Operation resource. Google Compute Engine has three + Operation resources: \* + `Global `__ \* + `Regional `__ \* + `Zonal `__ You can + use an operation resource to manage asynchronous API requests. For + more information, read Handling API responses. Operations can be + global, regional or zonal. - For global operations, use the + ``globalOperations`` resource. - For regional operations, use the + ``regionOperations`` resource. - For zonal operations, use the + ``zonalOperations`` resource. For more information, read Global, + Regional, and Zonal Resources. + + Attributes: + client_operation_id (str): + [Output Only] The value of ``requestId`` if you provided it + in the request. Not present otherwise. + + This field is a member of `oneof`_ ``_client_operation_id``. + creation_timestamp (str): + [Deprecated] This field is deprecated. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + [Output Only] A textual description of the operation, which + is set when the operation is created. + + This field is a member of `oneof`_ ``_description``. + end_time (str): + [Output Only] The time that this operation was completed. + This value is in RFC3339 text format. + + This field is a member of `oneof`_ ``_end_time``. + error (google.cloud.compute_v1.types.Error): + [Output Only] If errors are generated during processing of + the operation, this field will be populated. + + This field is a member of `oneof`_ ``_error``. + http_error_message (str): + [Output Only] If the operation fails, this field contains + the HTTP error message that was returned, such as + ``NOT FOUND``. + + This field is a member of `oneof`_ ``_http_error_message``. + http_error_status_code (int): + [Output Only] If the operation fails, this field contains + the HTTP error status code that was returned. For example, a + ``404`` means the resource was not found. + + This field is a member of `oneof`_ ``_http_error_status_code``. + id (int): + [Output Only] The unique identifier for the operation. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + insert_time (str): + [Output Only] The time that this operation was requested. + This value is in RFC3339 text format. + + This field is a member of `oneof`_ ``_insert_time``. + kind (str): + [Output Only] Type of the resource. Always + ``compute#operation`` for Operation resources. + + This field is a member of `oneof`_ ``_kind``. + name (str): + [Output Only] Name of the operation. + + This field is a member of `oneof`_ ``_name``. + operation_group_id (str): + [Output Only] An ID that represents a group of operations, + such as when a group of operations results from a + ``bulkInsert`` API request. + + This field is a member of `oneof`_ ``_operation_group_id``. + operation_type (str): + [Output Only] The type of operation, such as ``insert``, + ``update``, or ``delete``, and so on. + + This field is a member of `oneof`_ ``_operation_type``. + progress (int): + [Output Only] An optional progress indicator that ranges + from 0 to 100. There is no requirement that this be linear + or support any granularity of operations. This should not be + used to guess when the operation will be complete. This + number should monotonically increase as the operation + progresses. + + This field is a member of `oneof`_ ``_progress``. + region (str): + [Output Only] The URL of the region where the operation + resides. Only applicable when performing regional + operations. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + start_time (str): + [Output Only] The time that this operation was started by + the server. This value is in RFC3339 text format. + + This field is a member of `oneof`_ ``_start_time``. + status (google.cloud.compute_v1.types.Operation.Status): + [Output Only] The status of the operation, which can be one + of the following: ``PENDING``, ``RUNNING``, or ``DONE``. + + This field is a member of `oneof`_ ``_status``. + status_message (str): + [Output Only] An optional textual description of the current + status of the operation. + + This field is a member of `oneof`_ ``_status_message``. + target_id (int): + [Output Only] The unique target ID, which identifies a + specific incarnation of the target resource. + + This field is a member of `oneof`_ ``_target_id``. + target_link (str): + [Output Only] The URL of the resource that the operation + modifies. For operations related to creating a snapshot, + this points to the persistent disk that the snapshot was + created from. + + This field is a member of `oneof`_ ``_target_link``. + user (str): + [Output Only] User who requested the operation, for example: + ``user@example.com``. + + This field is a member of `oneof`_ ``_user``. + warnings (Sequence[google.cloud.compute_v1.types.Warnings]): + [Output Only] If warning messages are generated during + processing of the operation, this field will be populated. + zone (str): + [Output Only] The URL of the zone where the operation + resides. Only applicable when performing per-zone + operations. + + This field is a member of `oneof`_ ``_zone``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the operation, which can be one of the + following: ``PENDING``, ``RUNNING``, or ``DONE``. + """ + UNDEFINED_STATUS = 0 + DONE = 2104194 + PENDING = 35394935 + RUNNING = 121282975 + + client_operation_id = proto.Field( + proto.STRING, + number=297240295, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + end_time = proto.Field( + proto.STRING, + number=114938801, + optional=True, + ) + error = proto.Field( + proto.MESSAGE, + number=96784904, + optional=True, + message='Error', + ) + http_error_message = proto.Field( + proto.STRING, + number=202521945, + optional=True, + ) + http_error_status_code = proto.Field( + proto.INT32, + number=312345196, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + insert_time = proto.Field( + proto.STRING, + number=433722515, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + operation_group_id = proto.Field( + proto.STRING, + number=40171187, + optional=True, + ) + operation_type = proto.Field( + proto.STRING, + number=177650450, + optional=True, + ) + progress = proto.Field( + proto.INT32, + number=72663597, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + start_time = proto.Field( + proto.STRING, + number=37467274, + optional=True, + ) + status = proto.Field( + proto.ENUM, + number=181260274, + optional=True, + enum=Status, + ) + status_message = proto.Field( + proto.STRING, + number=297428154, + optional=True, + ) + target_id = proto.Field( + proto.UINT64, + number=258165385, + optional=True, + ) + target_link = proto.Field( + proto.STRING, + number=62671336, + optional=True, + ) + user = proto.Field( + proto.STRING, + number=3599307, + optional=True, + ) + warnings = proto.RepeatedField( + proto.MESSAGE, + number=498091095, + message='Warnings', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class OperationAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.OperationAggregatedList.ItemsEntry]): + [Output Only] A map of scoped operation lists. + kind (str): + [Output Only] Type of resource. Always + ``compute#operationAggregatedList`` for aggregated lists of + operations. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than ``maxResults``, use the ``nextPageToken`` as a + value for the query parameter ``pageToken`` in the next list + request. Subsequent list requests will have their own + ``nextPageToken`` to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='OperationsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class OperationList(proto.Message): + r"""Contains a list of Operation resources. + + Attributes: + id (str): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Operation]): + [Output Only] A list of Operation resources. + kind (str): + [Output Only] Type of resource. Always + ``compute#operations`` for Operations resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than ``maxResults``, use the ``nextPageToken`` as a + value for the query parameter ``pageToken`` in the next list + request. Subsequent list requests will have their own + ``nextPageToken`` to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Operation', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class OperationsScopedList(proto.Message): + r""" + + Attributes: + operations (Sequence[google.cloud.compute_v1.types.Operation]): + [Output Only] A list of operations contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of operations when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + operations = proto.RepeatedField( + proto.MESSAGE, + number=4184044, + message='Operation', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class OutlierDetection(proto.Message): + r"""Settings controlling the eviction of unhealthy hosts from the + load balancing pool for the backend service. + + Attributes: + base_ejection_time (google.cloud.compute_v1.types.Duration): + The base time that a host is ejected for. The + real ejection time is equal to the base ejection + time multiplied by the number of times the host + has been ejected. Defaults to 30000ms or 30s. + + This field is a member of `oneof`_ ``_base_ejection_time``. + consecutive_errors (int): + Number of errors before a host is ejected + from the connection pool. When the backend host + is accessed over HTTP, a 5xx return code + qualifies as an error. Defaults to 5. + + This field is a member of `oneof`_ ``_consecutive_errors``. + consecutive_gateway_failure (int): + The number of consecutive gateway failures + (502, 503, 504 status or connection errors that + are mapped to one of those status codes) before + a consecutive gateway failure ejection occurs. + Defaults to 3. + + This field is a member of `oneof`_ ``_consecutive_gateway_failure``. + enforcing_consecutive_errors (int): + The percentage chance that a host will be + actually ejected when an outlier status is + detected through consecutive 5xx. This setting + can be used to disable ejection or to ramp it up + slowly. Defaults to 0. + + This field is a member of `oneof`_ ``_enforcing_consecutive_errors``. + enforcing_consecutive_gateway_failure (int): + The percentage chance that a host will be + actually ejected when an outlier status is + detected through consecutive gateway failures. + This setting can be used to disable ejection or + to ramp it up slowly. Defaults to 100. + + This field is a member of `oneof`_ ``_enforcing_consecutive_gateway_failure``. + enforcing_success_rate (int): + The percentage chance that a host will be + actually ejected when an outlier status is + detected through success rate statistics. This + setting can be used to disable ejection or to + ramp it up slowly. Defaults to 100. + + This field is a member of `oneof`_ ``_enforcing_success_rate``. + interval (google.cloud.compute_v1.types.Duration): + Time interval between ejection analysis + sweeps. This can result in both new ejections as + well as hosts being returned to service. + Defaults to 1 second. + + This field is a member of `oneof`_ ``_interval``. + max_ejection_percent (int): + Maximum percentage of hosts in the load + balancing pool for the backend service that can + be ejected. Defaults to 50%. + + This field is a member of `oneof`_ ``_max_ejection_percent``. + success_rate_minimum_hosts (int): + The number of hosts in a cluster that must + have enough request volume to detect success + rate outliers. If the number of hosts is less + than this setting, outlier detection via success + rate statistics is not performed for any host in + the cluster. Defaults to 5. + + This field is a member of `oneof`_ ``_success_rate_minimum_hosts``. + success_rate_request_volume (int): + The minimum number of total requests that + must be collected in one interval (as defined by + the interval duration above) to include this + host in success rate based outlier detection. If + the volume is lower than this setting, outlier + detection via success rate statistics is not + performed for that host. Defaults to 100. + + This field is a member of `oneof`_ ``_success_rate_request_volume``. + success_rate_stdev_factor (int): + This factor is used to determine the ejection threshold for + success rate outlier ejection. The ejection threshold is the + difference between the mean success rate, and the product of + this factor and the standard deviation of the mean success + rate: mean - (stdev \* success_rate_stdev_factor). This + factor is divided by a thousand to get a double. That is, if + the desired factor is 1.9, the runtime value should be 1900. + Defaults to 1900. + + This field is a member of `oneof`_ ``_success_rate_stdev_factor``. + """ + + base_ejection_time = proto.Field( + proto.MESSAGE, + number=80997255, + optional=True, + message='Duration', + ) + consecutive_errors = proto.Field( + proto.INT32, + number=387193248, + optional=True, + ) + consecutive_gateway_failure = proto.Field( + proto.INT32, + number=417504250, + optional=True, + ) + enforcing_consecutive_errors = proto.Field( + proto.INT32, + number=213133760, + optional=True, + ) + enforcing_consecutive_gateway_failure = proto.Field( + proto.INT32, + number=394440666, + optional=True, + ) + enforcing_success_rate = proto.Field( + proto.INT32, + number=194508732, + optional=True, + ) + interval = proto.Field( + proto.MESSAGE, + number=33547461, + optional=True, + message='Duration', + ) + max_ejection_percent = proto.Field( + proto.INT32, + number=18436888, + optional=True, + ) + success_rate_minimum_hosts = proto.Field( + proto.INT32, + number=525766903, + optional=True, + ) + success_rate_request_volume = proto.Field( + proto.INT32, + number=281425357, + optional=True, + ) + success_rate_stdev_factor = proto.Field( + proto.INT32, + number=174735773, + optional=True, + ) + + +class PacketMirroring(proto.Message): + r"""Represents a Packet Mirroring resource. Packet Mirroring + clones the traffic of specified instances in your Virtual + Private Cloud (VPC) network and forwards it to a collector + destination, such as an instance group of an internal TCP/UDP + load balancer, for analysis or examination. For more information + about setting up Packet Mirroring, see Using Packet Mirroring. + + Attributes: + collector_ilb (google.cloud.compute_v1.types.PacketMirroringForwardingRuleInfo): + The Forwarding Rule resource of type + loadBalancingScheme=INTERNAL that will be used + as collector for mirrored traffic. The specified + forwarding rule must have isMirroringCollector + set to true. + + This field is a member of `oneof`_ ``_collector_ilb``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + enable (str): + Indicates whether or not this packet + mirroring takes effect. If set to FALSE, this + packet mirroring policy will not be enforced on + the network. The default is TRUE. Check the + Enable enum for the list of possible values. + + This field is a member of `oneof`_ ``_enable``. + filter (google.cloud.compute_v1.types.PacketMirroringFilter): + Filter for mirrored traffic. If unspecified, + all traffic is mirrored. + + This field is a member of `oneof`_ ``_filter``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#packetMirroring for packet mirrorings. + + This field is a member of `oneof`_ ``_kind``. + mirrored_resources (google.cloud.compute_v1.types.PacketMirroringMirroredResourceInfo): + PacketMirroring mirroredResourceInfos. + MirroredResourceInfo specifies a set of mirrored + VM instances, subnetworks and/or tags for which + traffic from/to all VM instances will be + mirrored. + + This field is a member of `oneof`_ ``_mirrored_resources``. + name (str): + Name of the resource; provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network (google.cloud.compute_v1.types.PacketMirroringNetworkInfo): + Specifies the mirrored VPC network. Only + packets in this network will be mirrored. All + mirrored VMs should have a NIC in the given + network. All mirrored subnetworks should belong + to the given network. + + This field is a member of `oneof`_ ``_network``. + priority (int): + The priority of applying this configuration. + Priority is used to break ties in cases where + there is more than one matching rule. In the + case of two rules that apply for a given + Instance, the one with the lowest-numbered + priority value wins. Default value is 1000. + Valid range is 0 through 65535. + + This field is a member of `oneof`_ ``_priority``. + region (str): + [Output Only] URI of the region where the packetMirroring + resides. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + """ + class Enable(proto.Enum): + r"""Indicates whether or not this packet mirroring takes effect. + If set to FALSE, this packet mirroring policy will not be + enforced on the network. The default is TRUE. + """ + UNDEFINED_ENABLE = 0 + FALSE = 66658563 + TRUE = 2583950 + + collector_ilb = proto.Field( + proto.MESSAGE, + number=426607853, + optional=True, + message='PacketMirroringForwardingRuleInfo', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + enable = proto.Field( + proto.STRING, + number=311764355, + optional=True, + ) + filter = proto.Field( + proto.MESSAGE, + number=336120696, + optional=True, + message='PacketMirroringFilter', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + mirrored_resources = proto.Field( + proto.MESSAGE, + number=124817348, + optional=True, + message='PacketMirroringMirroredResourceInfo', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.MESSAGE, + number=232872494, + optional=True, + message='PacketMirroringNetworkInfo', + ) + priority = proto.Field( + proto.UINT32, + number=445151652, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class PacketMirroringAggregatedList(proto.Message): + r"""Contains a list of packetMirrorings. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.PacketMirroringAggregatedList.ItemsEntry]): + A list of PacketMirroring resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='PacketMirroringsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class PacketMirroringFilter(proto.Message): + r""" + + Attributes: + I_p_protocols (Sequence[str]): + Protocols that apply as filter on mirrored + traffic. If no protocols are specified, all + traffic that matches the specified CIDR ranges + is mirrored. If neither cidrRanges nor + IPProtocols is specified, all traffic is + mirrored. + cidr_ranges (Sequence[str]): + IP CIDR ranges that apply as filter on the + source (ingress) or destination (egress) IP in + the IP header. Only IPv4 is supported. If no + ranges are specified, all traffic that matches + the specified IPProtocols is mirrored. If + neither cidrRanges nor IPProtocols is specified, + all traffic is mirrored. + direction (str): + Direction of traffic to mirror, either + INGRESS, EGRESS, or BOTH. The default is BOTH. + Check the Direction enum for the list of + possible values. + + This field is a member of `oneof`_ ``_direction``. + """ + class Direction(proto.Enum): + r"""Direction of traffic to mirror, either INGRESS, EGRESS, or + BOTH. The default is BOTH. + """ + UNDEFINED_DIRECTION = 0 + BOTH = 2044801 + EGRESS = 432880501 + INGRESS = 516931221 + + I_p_protocols = proto.RepeatedField( + proto.STRING, + number=98544854, + ) + cidr_ranges = proto.RepeatedField( + proto.STRING, + number=487901697, + ) + direction = proto.Field( + proto.STRING, + number=111150975, + optional=True, + ) + + +class PacketMirroringForwardingRuleInfo(proto.Message): + r""" + + Attributes: + canonical_url (str): + [Output Only] Unique identifier for the forwarding rule; + defined by the server. + + This field is a member of `oneof`_ ``_canonical_url``. + url (str): + Resource URL to the forwarding rule + representing the ILB configured as destination + of the mirrored traffic. + + This field is a member of `oneof`_ ``_url``. + """ + + canonical_url = proto.Field( + proto.STRING, + number=512294820, + optional=True, + ) + url = proto.Field( + proto.STRING, + number=116079, + optional=True, + ) + + +class PacketMirroringList(proto.Message): + r"""Contains a list of PacketMirroring resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.PacketMirroring]): + A list of PacketMirroring resources. + kind (str): + [Output Only] Type of resource. Always + compute#packetMirroring for packetMirrorings. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='PacketMirroring', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class PacketMirroringMirroredResourceInfo(proto.Message): + r""" + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.PacketMirroringMirroredResourceInfoInstanceInfo]): + A set of virtual machine instances that are + being mirrored. They must live in zones + contained in the same region as this + packetMirroring. Note that this config will + apply only to those network interfaces of the + Instances that belong to the network specified + in this packetMirroring. You may specify a + maximum of 50 Instances. + subnetworks (Sequence[google.cloud.compute_v1.types.PacketMirroringMirroredResourceInfoSubnetInfo]): + A set of subnetworks for which traffic + from/to all VM instances will be mirrored. They + must live in the same region as this + packetMirroring. You may specify a maximum of 5 + subnetworks. + tags (Sequence[str]): + A set of mirrored tags. Traffic from/to all + VM instances that have one or more of these tags + will be mirrored. + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='PacketMirroringMirroredResourceInfoInstanceInfo', + ) + subnetworks = proto.RepeatedField( + proto.MESSAGE, + number=415853125, + message='PacketMirroringMirroredResourceInfoSubnetInfo', + ) + tags = proto.RepeatedField( + proto.STRING, + number=3552281, + ) + + +class PacketMirroringMirroredResourceInfoInstanceInfo(proto.Message): + r""" + + Attributes: + canonical_url (str): + [Output Only] Unique identifier for the instance; defined by + the server. + + This field is a member of `oneof`_ ``_canonical_url``. + url (str): + Resource URL to the virtual machine instance + which is being mirrored. + + This field is a member of `oneof`_ ``_url``. + """ + + canonical_url = proto.Field( + proto.STRING, + number=512294820, + optional=True, + ) + url = proto.Field( + proto.STRING, + number=116079, + optional=True, + ) + + +class PacketMirroringMirroredResourceInfoSubnetInfo(proto.Message): + r""" + + Attributes: + canonical_url (str): + [Output Only] Unique identifier for the subnetwork; defined + by the server. + + This field is a member of `oneof`_ ``_canonical_url``. + url (str): + Resource URL to the subnetwork for which + traffic from/to all VM instances will be + mirrored. + + This field is a member of `oneof`_ ``_url``. + """ + + canonical_url = proto.Field( + proto.STRING, + number=512294820, + optional=True, + ) + url = proto.Field( + proto.STRING, + number=116079, + optional=True, + ) + + +class PacketMirroringNetworkInfo(proto.Message): + r""" + + Attributes: + canonical_url (str): + [Output Only] Unique identifier for the network; defined by + the server. + + This field is a member of `oneof`_ ``_canonical_url``. + url (str): + URL of the network resource. + + This field is a member of `oneof`_ ``_url``. + """ + + canonical_url = proto.Field( + proto.STRING, + number=512294820, + optional=True, + ) + url = proto.Field( + proto.STRING, + number=116079, + optional=True, + ) + + +class PacketMirroringsScopedList(proto.Message): + r""" + + Attributes: + packet_mirrorings (Sequence[google.cloud.compute_v1.types.PacketMirroring]): + A list of packetMirrorings contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of packetMirrorings when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + packet_mirrorings = proto.RepeatedField( + proto.MESSAGE, + number=154615079, + message='PacketMirroring', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class PatchAutoscalerRequest(proto.Message): + r"""A request message for Autoscalers.Patch. See the method + description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to patch. + + This field is a member of `oneof`_ ``_autoscaler``. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + Name of the zone for this request. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + optional=True, + ) + autoscaler_resource = proto.Field( + proto.MESSAGE, + number=207616118, + message='Autoscaler', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class PatchBackendBucketRequest(proto.Message): + r"""A request message for BackendBuckets.Patch. See the method + description for details. + + Attributes: + backend_bucket (str): + Name of the BackendBucket resource to patch. + backend_bucket_resource (google.cloud.compute_v1.types.BackendBucket): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_bucket = proto.Field( + proto.STRING, + number=91714037, + ) + backend_bucket_resource = proto.Field( + proto.MESSAGE, + number=380757784, + message='BackendBucket', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.Patch. See the method + description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to patch. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + backend_service_resource = proto.Field( + proto.MESSAGE, + number=347586723, + message='BackendService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.Patch. See the method + description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + firewall_policy_resource (google.cloud.compute_v1.types.FirewallPolicy): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + firewall_policy_resource = proto.Field( + proto.MESSAGE, + number=495049532, + message='FirewallPolicy', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchFirewallRequest(proto.Message): + r"""A request message for Firewalls.Patch. See the method + description for details. + + Attributes: + firewall (str): + Name of the firewall rule to patch. + firewall_resource (google.cloud.compute_v1.types.Firewall): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall = proto.Field( + proto.STRING, + number=511016192, + ) + firewall_resource = proto.Field( + proto.MESSAGE, + number=41425005, + message='Firewall', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchForwardingRuleRequest(proto.Message): + r"""A request message for ForwardingRules.Patch. See the method + description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource to patch. + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + forwarding_rule_resource = proto.Field( + proto.MESSAGE, + number=301211695, + message='ForwardingRule', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchGlobalForwardingRuleRequest(proto.Message): + r"""A request message for GlobalForwardingRules.Patch. See the + method description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource to patch. + forwarding_rule_resource (google.cloud.compute_v1.types.ForwardingRule): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + forwarding_rule_resource = proto.Field( + proto.MESSAGE, + number=301211695, + message='ForwardingRule', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchGlobalPublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for GlobalPublicDelegatedPrefixes.Patch. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix resource to + patch. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix = proto.Field( + proto.STRING, + number=204238440, + ) + public_delegated_prefix_resource = proto.Field( + proto.MESSAGE, + number=47594501, + message='PublicDelegatedPrefix', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchHealthCheckRequest(proto.Message): + r"""A request message for HealthChecks.Patch. See the method + description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to patch. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + health_check_resource = proto.Field( + proto.MESSAGE, + number=201925032, + message='HealthCheck', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchImageRequest(proto.Message): + r"""A request message for Images.Patch. See the method + description for details. + + Attributes: + image (str): + Name of the image resource to patch. + image_resource (google.cloud.compute_v1.types.Image): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + image = proto.Field( + proto.STRING, + number=100313435, + ) + image_resource = proto.Field( + proto.MESSAGE, + number=371171954, + message='Image', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.Patch. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the instance group manager. + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where you want to create + the managed instance group. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_manager_resource = proto.Field( + proto.MESSAGE, + number=261063946, + message='InstanceGroupManager', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class PatchInterconnectAttachmentRequest(proto.Message): + r"""A request message for InterconnectAttachments.Patch. See the + method description for details. + + Attributes: + interconnect_attachment (str): + Name of the interconnect attachment to patch. + interconnect_attachment_resource (google.cloud.compute_v1.types.InterconnectAttachment): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + interconnect_attachment = proto.Field( + proto.STRING, + number=308135284, + ) + interconnect_attachment_resource = proto.Field( + proto.MESSAGE, + number=212341369, + message='InterconnectAttachment', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchInterconnectRequest(proto.Message): + r"""A request message for Interconnects.Patch. See the method + description for details. + + Attributes: + interconnect (str): + Name of the interconnect to update. + interconnect_resource (google.cloud.compute_v1.types.Interconnect): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + interconnect = proto.Field( + proto.STRING, + number=224601230, + ) + interconnect_resource = proto.Field( + proto.MESSAGE, + number=397611167, + message='Interconnect', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchNetworkRequest(proto.Message): + r"""A request message for Networks.Patch. See the method + description for details. + + Attributes: + network (str): + Name of the network to update. + network_resource (google.cloud.compute_v1.types.Network): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + network_resource = proto.Field( + proto.MESSAGE, + number=122105599, + message='Network', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.Patch. See the method + description for details. + + Attributes: + node_group (str): + Name of the NodeGroup resource to update. + node_group_resource (google.cloud.compute_v1.types.NodeGroup): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + node_group = proto.Field( + proto.STRING, + number=469958146, + ) + node_group_resource = proto.Field( + proto.MESSAGE, + number=505321899, + message='NodeGroup', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class PatchPacketMirroringRequest(proto.Message): + r"""A request message for PacketMirrorings.Patch. See the method + description for details. + + Attributes: + packet_mirroring (str): + Name of the PacketMirroring resource to + patch. + packet_mirroring_resource (google.cloud.compute_v1.types.PacketMirroring): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + packet_mirroring = proto.Field( + proto.STRING, + number=22305996, + ) + packet_mirroring_resource = proto.Field( + proto.MESSAGE, + number=493501985, + message='PacketMirroring', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchPerInstanceConfigsInstanceGroupManagerRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.PatchPerInstanceConfigs. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + instance_group_managers_patch_per_instance_configs_req_resource (google.cloud.compute_v1.types.InstanceGroupManagersPatchPerInstanceConfigsReq): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. It should conform to + RFC1035. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_patch_per_instance_configs_req_resource = proto.Field( + proto.MESSAGE, + number=356650495, + message='InstanceGroupManagersPatchPerInstanceConfigsReq', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class PatchPerInstanceConfigsRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.PatchPerInstanceConfigs. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request, + should conform to RFC1035. + region_instance_group_manager_patch_instance_config_req_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagerPatchInstanceConfigReq): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_manager_patch_instance_config_req_resource = proto.Field( + proto.MESSAGE, + number=197682890, + message='RegionInstanceGroupManagerPatchInstanceConfigReq', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchPublicAdvertisedPrefixeRequest(proto.Message): + r"""A request message for PublicAdvertisedPrefixes.Patch. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + public_advertised_prefix (str): + Name of the PublicAdvertisedPrefix resource + to patch. + public_advertised_prefix_resource (google.cloud.compute_v1.types.PublicAdvertisedPrefix): + The body resource for this request + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_advertised_prefix = proto.Field( + proto.STRING, + number=101874590, + ) + public_advertised_prefix_resource = proto.Field( + proto.MESSAGE, + number=233614223, + message='PublicAdvertisedPrefix', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchPublicDelegatedPrefixeRequest(proto.Message): + r"""A request message for PublicDelegatedPrefixes.Patch. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + public_delegated_prefix (str): + Name of the PublicDelegatedPrefix resource to + patch. + public_delegated_prefix_resource (google.cloud.compute_v1.types.PublicDelegatedPrefix): + The body resource for this request + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + public_delegated_prefix = proto.Field( + proto.STRING, + number=204238440, + ) + public_delegated_prefix_resource = proto.Field( + proto.MESSAGE, + number=47594501, + message='PublicDelegatedPrefix', + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchRegionAutoscalerRequest(proto.Message): + r"""A request message for RegionAutoscalers.Patch. See the method + description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to patch. + + This field is a member of `oneof`_ ``_autoscaler``. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + optional=True, + ) + autoscaler_resource = proto.Field( + proto.MESSAGE, + number=207616118, + message='Autoscaler', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchRegionBackendServiceRequest(proto.Message): + r"""A request message for RegionBackendServices.Patch. See the + method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to patch. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + backend_service_resource = proto.Field( + proto.MESSAGE, + number=347586723, + message='BackendService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchRegionHealthCheckRequest(proto.Message): + r"""A request message for RegionHealthChecks.Patch. See the + method description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to patch. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + health_check_resource = proto.Field( + proto.MESSAGE, + number=201925032, + message='HealthCheck', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchRegionHealthCheckServiceRequest(proto.Message): + r"""A request message for RegionHealthCheckServices.Patch. See + the method description for details. + + Attributes: + health_check_service (str): + Name of the HealthCheckService to update. The + name must be 1-63 characters long, and comply + with RFC1035. + health_check_service_resource (google.cloud.compute_v1.types.HealthCheckService): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check_service = proto.Field( + proto.STRING, + number=408374747, + ) + health_check_service_resource = proto.Field( + proto.MESSAGE, + number=477367794, + message='HealthCheckService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for RegionInstanceGroupManagers.Patch. See + the method description for details. + + Attributes: + instance_group_manager (str): + The name of the instance group manager. + instance_group_manager_resource (google.cloud.compute_v1.types.InstanceGroupManager): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_manager_resource = proto.Field( + proto.MESSAGE, + number=261063946, + message='InstanceGroupManager', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchRegionUrlMapRequest(proto.Message): + r"""A request message for RegionUrlMaps.Patch. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + begin_interface: MixerMutationRequestBuilder Request ID to + support idempotency. + + This field is a member of `oneof`_ ``_request_id``. + url_map (str): + Name of the UrlMap resource to patch. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + url_map_resource = proto.Field( + proto.MESSAGE, + number=168675425, + message='UrlMap', + ) + + +class PatchRouterRequest(proto.Message): + r"""A request message for Routers.Patch. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + router (str): + Name of the Router resource to patch. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + router = proto.Field( + proto.STRING, + number=148608841, + ) + router_resource = proto.Field( + proto.MESSAGE, + number=155222084, + message='Router', + ) + + +class PatchRuleFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.PatchRule. See the + method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + firewall_policy_rule_resource (google.cloud.compute_v1.types.FirewallPolicyRule): + The body resource for this request + priority (int): + The priority of the rule to patch. + + This field is a member of `oneof`_ ``_priority``. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + firewall_policy_rule_resource = proto.Field( + proto.MESSAGE, + number=250523523, + message='FirewallPolicyRule', + ) + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class PatchRuleSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.PatchRule. See the + method description for details. + + Attributes: + priority (int): + The priority of the rule to patch. + + This field is a member of `oneof`_ ``_priority``. + project (str): + Project ID for this request. + security_policy (str): + Name of the security policy to update. + security_policy_rule_resource (google.cloud.compute_v1.types.SecurityPolicyRule): + The body resource for this request + """ + + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + ) + security_policy_rule_resource = proto.Field( + proto.MESSAGE, + number=402693443, + message='SecurityPolicyRule', + ) + + +class PatchSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.Patch. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + security_policy (str): + Name of the security policy to update. + security_policy_resource (google.cloud.compute_v1.types.SecurityPolicy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + ) + security_policy_resource = proto.Field( + proto.MESSAGE, + number=216159612, + message='SecurityPolicy', + ) + + +class PatchServiceAttachmentRequest(proto.Message): + r"""A request message for ServiceAttachments.Patch. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The region scoping this request and should + conform to RFC1035. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + service_attachment (str): + The resource id of the ServiceAttachment to + patch. It should conform to RFC1035 resource + name or be a string form on an unsigned long + number. + service_attachment_resource (google.cloud.compute_v1.types.ServiceAttachment): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + service_attachment = proto.Field( + proto.STRING, + number=338957549, + ) + service_attachment_resource = proto.Field( + proto.MESSAGE, + number=472980256, + message='ServiceAttachment', + ) + + +class PatchSslPolicyRequest(proto.Message): + r"""A request message for SslPolicies.Patch. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + ssl_policy (str): + Name of the SSL policy to update. The name + must be 1-63 characters long, and comply with + RFC1035. + ssl_policy_resource (google.cloud.compute_v1.types.SslPolicy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_policy = proto.Field( + proto.STRING, + number=295190213, + ) + ssl_policy_resource = proto.Field( + proto.MESSAGE, + number=274891848, + message='SslPolicy', + ) + + +class PatchSubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.Patch. See the method + description for details. + + Attributes: + drain_timeout_seconds (int): + The drain timeout specifies the upper bound in seconds on + the amount of time allowed to drain connections from the + current ACTIVE subnetwork to the current BACKUP subnetwork. + The drain timeout is only applicable when the following + conditions are true: - the subnetwork being patched has + purpose = INTERNAL_HTTPS_LOAD_BALANCER - the subnetwork + being patched has role = BACKUP - the patch request is + setting the role to ACTIVE. Note that after this patch + operation the roles of the ACTIVE and BACKUP subnetworks + will be swapped. + + This field is a member of `oneof`_ ``_drain_timeout_seconds``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + subnetwork (str): + Name of the Subnetwork resource to patch. + subnetwork_resource (google.cloud.compute_v1.types.Subnetwork): + The body resource for this request + """ + + drain_timeout_seconds = proto.Field( + proto.INT32, + number=357707098, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + ) + subnetwork_resource = proto.Field( + proto.MESSAGE, + number=42233151, + message='Subnetwork', + ) + + +class PatchTargetGrpcProxyRequest(proto.Message): + r"""A request message for TargetGrpcProxies.Patch. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_grpc_proxy (str): + Name of the TargetGrpcProxy resource to + patch. + target_grpc_proxy_resource (google.cloud.compute_v1.types.TargetGrpcProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_grpc_proxy = proto.Field( + proto.STRING, + number=5020283, + ) + target_grpc_proxy_resource = proto.Field( + proto.MESSAGE, + number=328922450, + message='TargetGrpcProxy', + ) + + +class PatchTargetHttpProxyRequest(proto.Message): + r"""A request message for TargetHttpProxies.Patch. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_http_proxy (str): + Name of the TargetHttpProxy resource to + patch. + target_http_proxy_resource (google.cloud.compute_v1.types.TargetHttpProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_http_proxy = proto.Field( + proto.STRING, + number=206872421, + ) + target_http_proxy_resource = proto.Field( + proto.MESSAGE, + number=24696744, + message='TargetHttpProxy', + ) + + +class PatchTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.Patch. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy (str): + Name of the TargetHttpsProxy resource to + patch. + target_https_proxy_resource (google.cloud.compute_v1.types.TargetHttpsProxy): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + target_https_proxy_resource = proto.Field( + proto.MESSAGE, + number=433657473, + message='TargetHttpsProxy', + ) + + +class PatchUrlMapRequest(proto.Message): + r"""A request message for UrlMaps.Patch. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + url_map (str): + Name of the UrlMap resource to patch. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + url_map_resource = proto.Field( + proto.MESSAGE, + number=168675425, + message='UrlMap', + ) + + +class PathMatcher(proto.Message): + r"""A matcher for the path portion of the URL. The BackendService + from the longest-matched rule will serve the URL. If no rule was + matched, the default service will be used. + + Attributes: + default_route_action (google.cloud.compute_v1.types.HttpRouteAction): + defaultRouteAction takes effect when none of + the pathRules or routeRules match. The load + balancer performs advanced routing actions like + URL rewrites, header transformations, etc. prior + to forwarding the request to the selected + backend. If defaultRouteAction specifies any + weightedBackendServices, defaultService must not + be set. Conversely if defaultService is set, + defaultRouteAction cannot contain any + weightedBackendServices. Only one of + defaultRouteAction or defaultUrlRedirect must be + set. UrlMaps for external HTTP(S) load balancers + support only the urlRewrite action within a + pathMatcher's defaultRouteAction. + + This field is a member of `oneof`_ ``_default_route_action``. + default_service (str): + The full or partial URL to the BackendService resource. This + will be used if none of the pathRules or routeRules defined + by this PathMatcher are matched. For example, the following + are all valid URLs to a BackendService resource: - + https://www.googleapis.com/compute/v1/projects/project + /global/backendServices/backendService - + compute/v1/projects/project/global/backendServices/backendService + - global/backendServices/backendService If + defaultRouteAction is additionally specified, advanced + routing actions like URL Rewrites, etc. take effect prior to + sending the request to the backend. However, if + defaultService is specified, defaultRouteAction cannot + contain any weightedBackendServices. Conversely, if + defaultRouteAction specifies any weightedBackendServices, + defaultService must not be specified. Only one of + defaultService, defaultUrlRedirect or + defaultRouteAction.weightedBackendService must be set. + Authorization requires one or more of the following Google + IAM permissions on the specified resource default_service: - + compute.backendBuckets.use - compute.backendServices.use + + This field is a member of `oneof`_ ``_default_service``. + default_url_redirect (google.cloud.compute_v1.types.HttpRedirectAction): + When none of the specified pathRules or + routeRules match, the request is redirected to a + URL specified by defaultUrlRedirect. If + defaultUrlRedirect is specified, defaultService + or defaultRouteAction must not be set. Not + supported when the URL map is bound to target + gRPC proxy. + + This field is a member of `oneof`_ ``_default_url_redirect``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + header_action (google.cloud.compute_v1.types.HttpHeaderAction): + Specifies changes to request and response + headers that need to take effect for the + selected backendService. HeaderAction specified + here are applied after the matching + HttpRouteRule HeaderAction and before the + HeaderAction in the UrlMap Note that + headerAction is not supported for Loadbalancers + that have their loadBalancingScheme set to + EXTERNAL. Not supported when the URL map is + bound to target gRPC proxy that has + validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_header_action``. + name (str): + The name to which this PathMatcher is + referred by the HostRule. + + This field is a member of `oneof`_ ``_name``. + path_rules (Sequence[google.cloud.compute_v1.types.PathRule]): + The list of path rules. Use this list instead of routeRules + when routing based on simple path matching is all that's + required. The order by which path rules are specified does + not matter. Matches are always done on the + longest-path-first basis. For example: a pathRule with a + path /a/b/c/\* will match before /a/b/\* irrespective of the + order in which those paths appear in this list. Within a + given pathMatcher, only one of pathRules or routeRules must + be set. + route_rules (Sequence[google.cloud.compute_v1.types.HttpRouteRule]): + The list of HTTP route rules. Use this list + instead of pathRules when advanced route + matching and routing actions are desired. + routeRules are evaluated in order of priority, + from the lowest to highest number. Within a + given pathMatcher, you can set only one of + pathRules or routeRules. + """ + + default_route_action = proto.Field( + proto.MESSAGE, + number=378919466, + optional=True, + message='HttpRouteAction', + ) + default_service = proto.Field( + proto.STRING, + number=370242231, + optional=True, + ) + default_url_redirect = proto.Field( + proto.MESSAGE, + number=359503338, + optional=True, + message='HttpRedirectAction', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + header_action = proto.Field( + proto.MESSAGE, + number=328077352, + optional=True, + message='HttpHeaderAction', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + path_rules = proto.RepeatedField( + proto.MESSAGE, + number=104439901, + message='PathRule', + ) + route_rules = proto.RepeatedField( + proto.MESSAGE, + number=376292225, + message='HttpRouteRule', + ) + + +class PathRule(proto.Message): + r"""A path-matching rule for a URL. If matched, will use the + specified BackendService to handle the traffic arriving at this + URL. + + Attributes: + paths (Sequence[str]): + The list of path patterns to match. Each must start with / + and the only place a \* is allowed is at the end following a + /. The string fed to the path matcher does not include any + text after the first ? or #, and those chars are not allowed + here. + route_action (google.cloud.compute_v1.types.HttpRouteAction): + In response to a matching path, the load + balancer performs advanced routing actions like + URL rewrites, header transformations, etc. prior + to forwarding the request to the selected + backend. If routeAction specifies any + weightedBackendServices, service must not be + set. Conversely if service is set, routeAction + cannot contain any weightedBackendServices. Only + one of routeAction or urlRedirect must be set. + UrlMaps for external HTTP(S) load balancers + support only the urlRewrite action within a + pathRule's routeAction. + + This field is a member of `oneof`_ ``_route_action``. + service (str): + The full or partial URL of the backend + service resource to which traffic is directed if + this rule is matched. If routeAction is + additionally specified, advanced routing actions + like URL Rewrites, etc. take effect prior to + sending the request to the backend. However, if + service is specified, routeAction cannot contain + any weightedBackendService s. Conversely, if + routeAction specifies any + weightedBackendServices, service must not be + specified. Only one of urlRedirect, service or + routeAction.weightedBackendService must be set. + + This field is a member of `oneof`_ ``_service``. + url_redirect (google.cloud.compute_v1.types.HttpRedirectAction): + When a path pattern is matched, the request + is redirected to a URL specified by urlRedirect. + If urlRedirect is specified, service or + routeAction must not be set. Not supported when + the URL map is bound to target gRPC proxy. + + This field is a member of `oneof`_ ``_url_redirect``. + """ + + paths = proto.RepeatedField( + proto.STRING, + number=106438894, + ) + route_action = proto.Field( + proto.MESSAGE, + number=424563948, + optional=True, + message='HttpRouteAction', + ) + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + url_redirect = proto.Field( + proto.MESSAGE, + number=405147820, + optional=True, + message='HttpRedirectAction', + ) + + +class PerInstanceConfig(proto.Message): + r""" + + Attributes: + fingerprint (str): + Fingerprint of this per-instance config. This + field can be used in optimistic locking. It is + ignored when inserting a per-instance config. An + up-to-date fingerprint must be provided in order + to update an existing per-instance config or the + field needs to be unset. + + This field is a member of `oneof`_ ``_fingerprint``. + name (str): + The name of a per-instance config and its + corresponding instance. Serves as a merge key + during UpdatePerInstanceConfigs operations, that + is, if a per-instance config with the same name + exists then it will be updated, otherwise a new + one will be created for the VM instance with the + same name. An attempt to create a per-instance + config for a VM instance that either doesn't + exist or is not part of the group will result in + an error. + + This field is a member of `oneof`_ ``_name``. + preserved_state (google.cloud.compute_v1.types.PreservedState): + The intended preserved state for the given + instance. Does not contain preserved state + generated from a stateful policy. + + This field is a member of `oneof`_ ``_preserved_state``. + status (str): + The status of applying this per-instance + config on the corresponding managed instance. + Check the Status enum for the list of possible + values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""The status of applying this per-instance config on the + corresponding managed instance. + """ + UNDEFINED_STATUS = 0 + APPLYING = 352003508 + DELETING = 528602024 + EFFECTIVE = 244201863 + NONE = 2402104 + UNAPPLIED = 483935140 + UNAPPLIED_DELETION = 313956873 + + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + preserved_state = proto.Field( + proto.MESSAGE, + number=2634026, + optional=True, + message='PreservedState', + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class Policy(proto.Message): + r"""An Identity and Access Management (IAM) policy, which specifies + access controls for Google Cloud resources. A ``Policy`` is a + collection of ``bindings``. A ``binding`` binds one or more + ``members`` to a single ``role``. Members can be user accounts, + service accounts, Google groups, and domains (such as G Suite). A + ``role`` is a named list of permissions; each ``role`` can be an IAM + predefined role or a user-created custom role. For some types of + Google Cloud resources, a ``binding`` can also specify a + ``condition``, which is a logical expression that allows access to a + resource only if the expression evaluates to ``true``. A condition + can add constraints based on attributes of the request, the + resource, or both. To learn which resources support conditions in + their IAM policies, see the `IAM + documentation `__. + **JSON example:** { "bindings": [ { "role": + "roles/resourcemanager.organizationAdmin", "members": [ + "user:mike@example.com", "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { + "role": "roles/resourcemanager.organizationViewer", "members": [ + "user:eve@example.com" ], "condition": { "title": "expirable + access", "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": + "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - + members: - user:mike@example.com - group:admins@example.com - + domain:google.com - + serviceAccount:my-project-id@appspot.gserviceaccount.com role: + roles/resourcemanager.organizationAdmin - members: - + user:eve@example.com role: roles/resourcemanager.organizationViewer + condition: title: expirable access description: Does not grant + access after Sep 2020 expression: request.time < + timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3 + For a description of IAM and its features, see the `IAM + documentation `__. + + Attributes: + audit_configs (Sequence[google.cloud.compute_v1.types.AuditConfig]): + Specifies cloud audit logging configuration + for this policy. + bindings (Sequence[google.cloud.compute_v1.types.Binding]): + Associates a list of ``members`` to a ``role``. Optionally, + may specify a ``condition`` that determines how and when the + ``bindings`` are applied. Each of the ``bindings`` must + contain at least one member. + etag (str): + ``etag`` is used for optimistic concurrency control as a way + to help prevent simultaneous updates of a policy from + overwriting each other. It is strongly suggested that + systems make use of the ``etag`` in the read-modify-write + cycle to perform policy updates in order to avoid race + conditions: An ``etag`` is returned in the response to + ``getIamPolicy``, and systems are expected to put that etag + in the request to ``setIamPolicy`` to ensure that their + change will be applied to the same version of the policy. + **Important:** If you use IAM Conditions, you must include + the ``etag`` field whenever you call ``setIamPolicy``. If + you omit this field, then IAM allows you to overwrite a + version ``3`` policy with a version ``1`` policy, and all of + the conditions in the version ``3`` policy are lost. + + This field is a member of `oneof`_ ``_etag``. + iam_owned (bool): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_iam_owned``. + rules (Sequence[google.cloud.compute_v1.types.Rule]): + This is deprecated and has no effect. Do not + use. + version (int): + Specifies the format of the policy. Valid values are ``0``, + ``1``, and ``3``. Requests that specify an invalid value are + rejected. Any operation that affects conditional role + bindings must specify version ``3``. This requirement + applies to the following operations: \* Getting a policy + that includes a conditional role binding \* Adding a + conditional role binding to a policy \* Changing a + conditional role binding in a policy \* Removing any role + binding, with or without a condition, from a policy that + includes conditions **Important:** If you use IAM + Conditions, you must include the ``etag`` field whenever you + call ``setIamPolicy``. If you omit this field, then IAM + allows you to overwrite a version ``3`` policy with a + version ``1`` policy, and all of the conditions in the + version ``3`` policy are lost. If a policy does not include + any conditions, operations on that policy may specify any + valid version or leave the field unset. To learn which + resources support conditions in their IAM policies, see the + `IAM + documentation `__. + + This field is a member of `oneof`_ ``_version``. + """ + + audit_configs = proto.RepeatedField( + proto.MESSAGE, + number=328080653, + message='AuditConfig', + ) + bindings = proto.RepeatedField( + proto.MESSAGE, + number=403251854, + message='Binding', + ) + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + iam_owned = proto.Field( + proto.BOOL, + number=450566203, + optional=True, + ) + rules = proto.RepeatedField( + proto.MESSAGE, + number=108873975, + message='Rule', + ) + version = proto.Field( + proto.INT32, + number=351608024, + optional=True, + ) + + +class PreconfiguredWafSet(proto.Message): + r""" + + Attributes: + expression_sets (Sequence[google.cloud.compute_v1.types.WafExpressionSet]): + List of entities that are currently supported + for WAF rules. + """ + + expression_sets = proto.RepeatedField( + proto.MESSAGE, + number=474011032, + message='WafExpressionSet', + ) + + +class PreservedState(proto.Message): + r"""Preserved state for a given instance. + + Attributes: + disks (Sequence[google.cloud.compute_v1.types.PreservedState.DisksEntry]): + Preserved disks defined for this instance. + This map is keyed with the device names of the + disks. + metadata (Sequence[google.cloud.compute_v1.types.PreservedState.MetadataEntry]): + Preserved metadata defined for this instance. + """ + + disks = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=95594102, + message='PreservedStatePreservedDisk', + ) + metadata = proto.MapField( + proto.STRING, + proto.STRING, + number=86866735, + ) + + +class PreservedStatePreservedDisk(proto.Message): + r""" + + Attributes: + auto_delete (str): + These stateful disks will never be deleted during + autohealing, update, instance recreate operations. This flag + is used to configure if the disk should be deleted after it + is no longer used by the group, e.g. when the given instance + or the whole MIG is deleted. Note: disks attached in + READ_ONLY mode cannot be auto-deleted. Check the AutoDelete + enum for the list of possible values. + + This field is a member of `oneof`_ ``_auto_delete``. + mode (str): + The mode in which to attach this disk, either READ_WRITE or + READ_ONLY. If not specified, the default is to attach the + disk in READ_WRITE mode. Check the Mode enum for the list of + possible values. + + This field is a member of `oneof`_ ``_mode``. + source (str): + The URL of the disk resource that is stateful + and should be attached to the VM instance. + + This field is a member of `oneof`_ ``_source``. + """ + class AutoDelete(proto.Enum): + r"""These stateful disks will never be deleted during autohealing, + update, instance recreate operations. This flag is used to configure + if the disk should be deleted after it is no longer used by the + group, e.g. when the given instance or the whole MIG is deleted. + Note: disks attached in READ_ONLY mode cannot be auto-deleted. + """ + UNDEFINED_AUTO_DELETE = 0 + NEVER = 74175084 + ON_PERMANENT_INSTANCE_DELETION = 95727719 + + class Mode(proto.Enum): + r"""The mode in which to attach this disk, either READ_WRITE or + READ_ONLY. If not specified, the default is to attach the disk in + READ_WRITE mode. + """ + UNDEFINED_MODE = 0 + READ_ONLY = 91950261 + READ_WRITE = 173607894 + + auto_delete = proto.Field( + proto.STRING, + number=464761403, + optional=True, + ) + mode = proto.Field( + proto.STRING, + number=3357091, + optional=True, + ) + source = proto.Field( + proto.STRING, + number=177235995, + optional=True, + ) + + +class PreviewRouterRequest(proto.Message): + r"""A request message for Routers.Preview. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + router (str): + Name of the Router resource to query. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + router = proto.Field( + proto.STRING, + number=148608841, + ) + router_resource = proto.Field( + proto.MESSAGE, + number=155222084, + message='Router', + ) + + +class Project(proto.Message): + r"""Represents a Project resource. A project is used to organize + resources in a Google Cloud Platform environment. For more + information, read about the Resource Hierarchy. + + Attributes: + common_instance_metadata (google.cloud.compute_v1.types.Metadata): + Metadata key/value pairs available to all + instances contained in this project. See Custom + metadata for more information. + + This field is a member of `oneof`_ ``_common_instance_metadata``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + default_network_tier (str): + This signifies the default network tier used + for configuring resources of the project and can + only take the following values: PREMIUM, + STANDARD. Initially the default network tier is + PREMIUM. Check the DefaultNetworkTier enum for + the list of possible values. + + This field is a member of `oneof`_ ``_default_network_tier``. + default_service_account (str): + [Output Only] Default service account used by VMs running in + this project. + + This field is a member of `oneof`_ ``_default_service_account``. + description (str): + An optional textual description of the + resource. + + This field is a member of `oneof`_ ``_description``. + enabled_features (Sequence[str]): + Restricted features enabled for use on this + project. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. This is *not* the + project ID, and is just a unique ID used by Compute Engine + to identify resources. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#project + for projects. + + This field is a member of `oneof`_ ``_kind``. + name (str): + The project ID. For example: my-example- + roject. Use the project ID to make requests to + Compute Engine. + + This field is a member of `oneof`_ ``_name``. + quotas (Sequence[google.cloud.compute_v1.types.Quota]): + [Output Only] Quotas assigned to this project. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + usage_export_location (google.cloud.compute_v1.types.UsageExportLocation): + The naming prefix for daily usage reports and + the Google Cloud Storage bucket where they are + stored. + + This field is a member of `oneof`_ ``_usage_export_location``. + xpn_project_status (str): + [Output Only] The role this project has in a shared VPC + configuration. Currently, only projects with the host role, + which is specified by the value HOST, are differentiated. + Check the XpnProjectStatus enum for the list of possible + values. + + This field is a member of `oneof`_ ``_xpn_project_status``. + """ + class DefaultNetworkTier(proto.Enum): + r"""This signifies the default network tier used for configuring + resources of the project and can only take the following values: + PREMIUM, STANDARD. Initially the default network tier is + PREMIUM. + """ + UNDEFINED_DEFAULT_NETWORK_TIER = 0 + PREMIUM = 399530551 + STANDARD = 484642493 + + class XpnProjectStatus(proto.Enum): + r"""[Output Only] The role this project has in a shared VPC + configuration. Currently, only projects with the host role, which is + specified by the value HOST, are differentiated. + """ + UNDEFINED_XPN_PROJECT_STATUS = 0 + HOST = 2223528 + UNSPECIFIED_XPN_PROJECT_STATUS = 340393257 + + common_instance_metadata = proto.Field( + proto.MESSAGE, + number=185794117, + optional=True, + message='Metadata', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + default_network_tier = proto.Field( + proto.STRING, + number=471753361, + optional=True, + ) + default_service_account = proto.Field( + proto.STRING, + number=298712229, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + enabled_features = proto.RepeatedField( + proto.STRING, + number=469017467, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + quotas = proto.RepeatedField( + proto.MESSAGE, + number=125341947, + message='Quota', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + usage_export_location = proto.Field( + proto.MESSAGE, + number=347543874, + optional=True, + message='UsageExportLocation', + ) + xpn_project_status = proto.Field( + proto.STRING, + number=228419265, + optional=True, + ) + + +class ProjectsDisableXpnResourceRequest(proto.Message): + r""" + + Attributes: + xpn_resource (google.cloud.compute_v1.types.XpnResourceId): + Service resource (a.k.a service project) ID. + + This field is a member of `oneof`_ ``_xpn_resource``. + """ + + xpn_resource = proto.Field( + proto.MESSAGE, + number=133384631, + optional=True, + message='XpnResourceId', + ) + + +class ProjectsEnableXpnResourceRequest(proto.Message): + r""" + + Attributes: + xpn_resource (google.cloud.compute_v1.types.XpnResourceId): + Service resource (a.k.a service project) ID. + + This field is a member of `oneof`_ ``_xpn_resource``. + """ + + xpn_resource = proto.Field( + proto.MESSAGE, + number=133384631, + optional=True, + message='XpnResourceId', + ) + + +class ProjectsGetXpnResources(proto.Message): + r""" + + Attributes: + kind (str): + [Output Only] Type of resource. Always + compute#projectsGetXpnResources for lists of service + resources (a.k.a service projects) + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + resources (Sequence[google.cloud.compute_v1.types.XpnResourceId]): + Service resources (a.k.a service projects) + attached to this project as their shared VPC + host. + """ + + @property + def raw_page(self): + return self + + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + resources = proto.RepeatedField( + proto.MESSAGE, + number=164412965, + message='XpnResourceId', + ) + + +class ProjectsListXpnHostsRequest(proto.Message): + r""" + + Attributes: + organization (str): + Optional organization ID managed by Cloud + Resource Manager, for which to list shared VPC + host projects. If not specified, the + organization will be inferred from the project. + + This field is a member of `oneof`_ ``_organization``. + """ + + organization = proto.Field( + proto.STRING, + number=105180467, + optional=True, + ) + + +class ProjectsSetDefaultNetworkTierRequest(proto.Message): + r""" + + Attributes: + network_tier (str): + Default network tier to be set. + Check the NetworkTier enum for the list of + possible values. + + This field is a member of `oneof`_ ``_network_tier``. + """ + class NetworkTier(proto.Enum): + r"""Default network tier to be set.""" + UNDEFINED_NETWORK_TIER = 0 + PREMIUM = 399530551 + STANDARD = 484642493 + + network_tier = proto.Field( + proto.STRING, + number=517397843, + optional=True, + ) + + +class PublicAdvertisedPrefix(proto.Message): + r"""A public advertised prefix represents an aggregated IP prefix + or netblock which customers bring to cloud. The IP prefix is a + single unit of route advertisement and is announced globally to + the internet. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + dns_verification_ip (str): + The IPv4 address to be used for reverse DNS + verification. + + This field is a member of `oneof`_ ``_dns_verification_ip``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a new + PublicAdvertisedPrefix. An up-to-date + fingerprint must be provided in order to update + the PublicAdvertisedPrefix, otherwise the + request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve a + PublicAdvertisedPrefix. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource type. + The server generates this identifier. + + This field is a member of `oneof`_ ``_id``. + ip_cidr_range (str): + The IPv4 address range, in CIDR format, + represented by this public advertised prefix. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + kind (str): + [Output Only] Type of the resource. Always + compute#publicAdvertisedPrefix for public advertised + prefixes. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + public_delegated_prefixs (Sequence[google.cloud.compute_v1.types.PublicAdvertisedPrefixPublicDelegatedPrefix]): + [Output Only] The list of public delegated prefixes that + exist for this public advertised prefix. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + shared_secret (str): + [Output Only] The shared secret to be used for reverse DNS + verification. + + This field is a member of `oneof`_ ``_shared_secret``. + status (str): + The status of the public advertised prefix. + Check the Status enum for the list of possible + values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""The status of the public advertised prefix.""" + UNDEFINED_STATUS = 0 + INITIAL = 518841124 + PREFIX_CONFIGURATION_COMPLETE = 480889551 + PREFIX_CONFIGURATION_IN_PROGRESS = 378550961 + PREFIX_REMOVAL_IN_PROGRESS = 284375783 + PTR_CONFIGURED = 513497167 + REVERSE_DNS_LOOKUP_FAILED = 295755183 + VALIDATED = 66197998 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + dns_verification_ip = proto.Field( + proto.STRING, + number=241011381, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + public_delegated_prefixs = proto.RepeatedField( + proto.MESSAGE, + number=425811723, + message='PublicAdvertisedPrefixPublicDelegatedPrefix', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + shared_secret = proto.Field( + proto.STRING, + number=381932490, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class PublicAdvertisedPrefixList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.PublicAdvertisedPrefix]): + A list of PublicAdvertisedPrefix resources. + kind (str): + [Output Only] Type of the resource. Always + compute#publicAdvertisedPrefix for public advertised + prefixes. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='PublicAdvertisedPrefix', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class PublicAdvertisedPrefixPublicDelegatedPrefix(proto.Message): + r"""Represents a CIDR range which can be used to assign + addresses. + + Attributes: + ip_range (str): + The IP address range of the public delegated + prefix + + This field is a member of `oneof`_ ``_ip_range``. + name (str): + The name of the public delegated prefix + + This field is a member of `oneof`_ ``_name``. + project (str): + The project number of the public delegated + prefix + + This field is a member of `oneof`_ ``_project``. + region (str): + The region of the public delegated prefix if + it is regional. If absent, the prefix is global. + + This field is a member of `oneof`_ ``_region``. + status (str): + The status of the public delegated prefix. + Possible values are: INITIALIZING: The public + delegated prefix is being initialized and + addresses cannot be created yet. ANNOUNCED: The + public delegated prefix is active. + + This field is a member of `oneof`_ ``_status``. + """ + + ip_range = proto.Field( + proto.STRING, + number=145092645, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class PublicDelegatedPrefix(proto.Message): + r"""A PublicDelegatedPrefix resource represents an IP block + within a PublicAdvertisedPrefix that is configured within a + single cloud scope (global or region). IPs in the block can be + allocated to resources within that scope. Public delegated + prefixes may be further broken up into smaller IP blocks in the + same scope as the parent block. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a new + PublicDelegatedPrefix. An up-to-date fingerprint + must be provided in order to update the + PublicDelegatedPrefix, otherwise the request + will fail with error 412 conditionNotMet. To see + the latest fingerprint, make a get() request to + retrieve a PublicDelegatedPrefix. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource type. + The server generates this identifier. + + This field is a member of `oneof`_ ``_id``. + ip_cidr_range (str): + The IPv4 address range, in CIDR format, + represented by this public delegated prefix. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + is_live_migration (bool): + If true, the prefix will be live migrated. + + This field is a member of `oneof`_ ``_is_live_migration``. + kind (str): + [Output Only] Type of the resource. Always + compute#publicDelegatedPrefix for public delegated prefixes. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + parent_prefix (str): + The URL of parent prefix. Either + PublicAdvertisedPrefix or PublicDelegatedPrefix. + + This field is a member of `oneof`_ ``_parent_prefix``. + public_delegated_sub_prefixs (Sequence[google.cloud.compute_v1.types.PublicDelegatedPrefixPublicDelegatedSubPrefix]): + The list of sub public delegated prefixes + that exist for this public delegated prefix. + region (str): + [Output Only] URL of the region where the public delegated + prefix resides. This field applies only to the region + resource. You must specify this field as part of the HTTP + request URL. It is not settable as a field in the request + body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + status (str): + [Output Only] The status of the public delegated prefix. + Check the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the public delegated prefix.""" + UNDEFINED_STATUS = 0 + ANNOUNCED = 365103355 + DELETING = 528602024 + INITIALIZING = 306588749 + READY_TO_ANNOUNCE = 64641265 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + is_live_migration = proto.Field( + proto.BOOL, + number=511823856, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + parent_prefix = proto.Field( + proto.STRING, + number=15233991, + optional=True, + ) + public_delegated_sub_prefixs = proto.RepeatedField( + proto.MESSAGE, + number=188940044, + message='PublicDelegatedPrefixPublicDelegatedSubPrefix', + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class PublicDelegatedPrefixAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.PublicDelegatedPrefixAggregatedList.ItemsEntry]): + A list of PublicDelegatedPrefixesScopedList + resources. + kind (str): + [Output Only] Type of the resource. Always + compute#publicDelegatedPrefixAggregatedList for aggregated + lists of public delegated prefixes. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='PublicDelegatedPrefixesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class PublicDelegatedPrefixList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.PublicDelegatedPrefix]): + A list of PublicDelegatedPrefix resources. + kind (str): + [Output Only] Type of the resource. Always + compute#publicDelegatedPrefixList for public delegated + prefixes. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='PublicDelegatedPrefix', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class PublicDelegatedPrefixPublicDelegatedSubPrefix(proto.Message): + r"""Represents a sub PublicDelegatedPrefix. + + Attributes: + delegatee_project (str): + Name of the project scoping this + PublicDelegatedSubPrefix. + + This field is a member of `oneof`_ ``_delegatee_project``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + ip_cidr_range (str): + The IPv4 address range, in CIDR format, + represented by this sub public delegated prefix. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + is_address (bool): + Whether the sub prefix is delegated to create + Address resources in the delegatee project. + + This field is a member of `oneof`_ ``_is_address``. + name (str): + The name of the sub public delegated prefix. + + This field is a member of `oneof`_ ``_name``. + region (str): + [Output Only] The region of the sub public delegated prefix + if it is regional. If absent, the sub prefix is global. + + This field is a member of `oneof`_ ``_region``. + status (str): + [Output Only] The status of the sub public delegated prefix. + Check the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the sub public delegated prefix.""" + UNDEFINED_STATUS = 0 + ACTIVE = 314733318 + INACTIVE = 270421099 + + delegatee_project = proto.Field( + proto.STRING, + number=414860634, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + is_address = proto.Field( + proto.BOOL, + number=352617951, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class PublicDelegatedPrefixesScopedList(proto.Message): + r""" + + Attributes: + public_delegated_prefixes (Sequence[google.cloud.compute_v1.types.PublicDelegatedPrefix]): + [Output Only] A list of PublicDelegatedPrefixes contained in + this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of public delegated prefixes when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + public_delegated_prefixes = proto.RepeatedField( + proto.MESSAGE, + number=315261206, + message='PublicDelegatedPrefix', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class Quota(proto.Message): + r"""A quotas entry. + + Attributes: + limit (float): + [Output Only] Quota limit for this metric. + + This field is a member of `oneof`_ ``_limit``. + metric (str): + [Output Only] Name of the quota metric. Check the Metric + enum for the list of possible values. + + This field is a member of `oneof`_ ``_metric``. + owner (str): + [Output Only] Owning resource. This is the resource on which + this quota is applied. + + This field is a member of `oneof`_ ``_owner``. + usage (float): + [Output Only] Current usage of this metric. + + This field is a member of `oneof`_ ``_usage``. + """ + class Metric(proto.Enum): + r"""[Output Only] Name of the quota metric.""" + UNDEFINED_METRIC = 0 + A2_CPUS = 153206585 + AFFINITY_GROUPS = 108303563 + AUTOSCALERS = 471248988 + BACKEND_BUCKETS = 137626846 + BACKEND_SERVICES = 269623753 + C2D_CPUS = 508182517 + C2_CPUS = 317601211 + C3_CPUS = 346230362 + COMMITMENTS = 456141790 + COMMITTED_A2_CPUS = 59330902 + COMMITTED_C2D_CPUS = 282390904 + COMMITTED_C2_CPUS = 223725528 + COMMITTED_C3_CPUS = 252354679 + COMMITTED_CPUS = 292394702 + COMMITTED_E2_CPUS = 388120154 + COMMITTED_LICENSES = 357606869 + COMMITTED_LOCAL_SSD_TOTAL_GB = 308393480 + COMMITTED_MEMORY_OPTIMIZED_CPUS = 489057886 + COMMITTED_N2A_CPUS = 40064304 + COMMITTED_N2D_CPUS = 125951757 + COMMITTED_N2_CPUS = 322589603 + COMMITTED_NVIDIA_A100_GPUS = 375799445 + COMMITTED_NVIDIA_K80_GPUS = 3857188 + COMMITTED_NVIDIA_P100_GPUS = 107528100 + COMMITTED_NVIDIA_P4_GPUS = 347952897 + COMMITTED_NVIDIA_T4_GPUS = 139871237 + COMMITTED_NVIDIA_V100_GPUS = 219562 + COMMITTED_T2D_CPUS = 382266439 + CPUS = 2075595 + CPUS_ALL_REGIONS = 470911149 + DISKS_TOTAL_GB = 353520543 + E2_CPUS = 481995837 + EXTERNAL_NETWORK_LB_FORWARDING_RULES = 374298265 + EXTERNAL_PROTOCOL_FORWARDING_RULES = 63478888 + EXTERNAL_VPN_GATEWAYS = 272457134 + FIREWALLS = 374485843 + FORWARDING_RULES = 432668949 + GLOBAL_INTERNAL_ADDRESSES = 42738332 + GPUS_ALL_REGIONS = 39387177 + HEALTH_CHECKS = 289347502 + IMAGES = 15562360 + INSTANCES = 131337822 + INSTANCE_GROUPS = 355919038 + INSTANCE_GROUP_MANAGERS = 101798192 + INSTANCE_TEMPLATES = 226188271 + INTERCONNECTS = 415204741 + INTERCONNECT_ATTACHMENTS_PER_REGION = 159968086 + INTERCONNECT_ATTACHMENTS_TOTAL_MBPS = 425090419 + INTERCONNECT_TOTAL_GBPS = 285341866 + INTERNAL_ADDRESSES = 197899392 + INTERNAL_TRAFFIC_DIRECTOR_FORWARDING_RULES = 266433668 + IN_PLACE_SNAPSHOTS = 151359133 + IN_USE_ADDRESSES = 402125072 + IN_USE_BACKUP_SCHEDULES = 32786705 + IN_USE_SNAPSHOT_SCHEDULES = 462104083 + LOCAL_SSD_TOTAL_GB = 330878021 + M1_CPUS = 37203366 + M2_CPUS = 65832517 + MACHINE_IMAGES = 446986640 + N2A_CPUS = 265855917 + N2D_CPUS = 351743370 + N2_CPUS = 416465286 + NETWORKS = 485481477 + NETWORK_ENDPOINT_GROUPS = 102144909 + NETWORK_FIREWALL_POLICIES = 101117374 + NODE_GROUPS = 24624817 + NODE_TEMPLATES = 474896668 + NVIDIA_A100_GPUS = 504872978 + NVIDIA_K80_GPUS = 163886599 + NVIDIA_P100_GPUS = 236601633 + NVIDIA_P100_VWS_GPUS = 213970574 + NVIDIA_P4_GPUS = 283841470 + NVIDIA_P4_VWS_GPUS = 528296619 + NVIDIA_T4_GPUS = 75759810 + NVIDIA_T4_VWS_GPUS = 319813039 + NVIDIA_V100_GPUS = 129293095 + PACKET_MIRRORINGS = 15578407 + PD_EXTREME_TOTAL_PROVISIONED_IOPS = 69593965 + PREEMPTIBLE_CPUS = 251184841 + PREEMPTIBLE_LOCAL_SSD_GB = 260819336 + PREEMPTIBLE_NVIDIA_A100_GPUS = 68832784 + PREEMPTIBLE_NVIDIA_K80_GPUS = 374960201 + PREEMPTIBLE_NVIDIA_P100_GPUS = 337432351 + PREEMPTIBLE_NVIDIA_P100_VWS_GPUS = 313544076 + PREEMPTIBLE_NVIDIA_P4_GPUS = 429197628 + PREEMPTIBLE_NVIDIA_P4_VWS_GPUS = 252981545 + PREEMPTIBLE_NVIDIA_T4_GPUS = 221115968 + PREEMPTIBLE_NVIDIA_T4_VWS_GPUS = 44497965 + PREEMPTIBLE_NVIDIA_V100_GPUS = 230123813 + PSC_ILB_CONSUMER_FORWARDING_RULES_PER_PRODUCER_NETWORK = 231164291 + PSC_INTERNAL_LB_FORWARDING_RULES = 169005435 + PUBLIC_ADVERTISED_PREFIXES = 471371980 + PUBLIC_DELEGATED_PREFIXES = 532465974 + REGIONAL_AUTOSCALERS = 29363772 + REGIONAL_INSTANCE_GROUP_MANAGERS = 37543696 + RESERVATIONS = 32644647 + RESOURCE_POLICIES = 83955297 + ROUTERS = 493018666 + ROUTES = 275680074 + SECURITY_POLICIES = 189518703 + SECURITY_POLICIES_PER_REGION = 249041734 + SECURITY_POLICY_CEVAL_RULES = 470815689 + SECURITY_POLICY_RULES = 203549225 + SECURITY_POLICY_RULES_PER_REGION = 126510156 + SERVICE_ATTACHMENTS = 471521510 + SNAPSHOTS = 343405327 + SSD_TOTAL_GB = 161732561 + SSL_CERTIFICATES = 378372399 + STATIC_ADDRESSES = 93624049 + STATIC_BYOIP_ADDRESSES = 275809649 + SUBNETWORKS = 421330469 + T2D_CPUS = 71187140 + TARGET_HTTPS_PROXIES = 219522506 + TARGET_HTTP_PROXIES = 164117155 + TARGET_INSTANCES = 284519728 + TARGET_POOLS = 348261257 + TARGET_SSL_PROXIES = 159216235 + TARGET_TCP_PROXIES = 182243136 + TARGET_VPN_GATEWAYS = 75029928 + URL_MAPS = 378660743 + VPN_GATEWAYS = 35620282 + VPN_TUNNELS = 104327296 + XPN_SERVICE_PROJECTS = 95191981 + + limit = proto.Field( + proto.DOUBLE, + number=102976443, + optional=True, + ) + metric = proto.Field( + proto.STRING, + number=533067184, + optional=True, + ) + owner = proto.Field( + proto.STRING, + number=106164915, + optional=True, + ) + usage = proto.Field( + proto.DOUBLE, + number=111574433, + optional=True, + ) + + +class RawDisk(proto.Message): + r"""The parameters of the raw disk image. + + Attributes: + container_type (str): + The format used to encode and transmit the + block device, which should be TAR. This is just + a container and transmission format and not a + runtime format. Provided by the client when the + disk image is created. Check the ContainerType + enum for the list of possible values. + + This field is a member of `oneof`_ ``_container_type``. + sha1_checksum (str): + [Deprecated] This field is deprecated. An optional SHA1 + checksum of the disk image before unpackaging provided by + the client when the disk image is created. + + This field is a member of `oneof`_ ``_sha1_checksum``. + source (str): + The full Google Cloud Storage URL where the raw disk image + archive is stored. The following are valid formats for the + URL: - + https://storage.googleapis.com/bucket_name/image_archive_name + - https://storage.googleapis.com/bucket_name/folder_name/ + image_archive_name In order to create an image, you must + provide the full or partial URL of one of the following: - + The rawDisk.source URL - The sourceDisk URL - The + sourceImage URL - The sourceSnapshot URL + + This field is a member of `oneof`_ ``_source``. + """ + class ContainerType(proto.Enum): + r"""The format used to encode and transmit the block device, + which should be TAR. This is just a container and transmission + format and not a runtime format. Provided by the client when the + disk image is created. + """ + UNDEFINED_CONTAINER_TYPE = 0 + TAR = 82821 + + container_type = proto.Field( + proto.STRING, + number=318809144, + optional=True, + ) + sha1_checksum = proto.Field( + proto.STRING, + number=314444349, + optional=True, + ) + source = proto.Field( + proto.STRING, + number=177235995, + optional=True, + ) + + +class RecreateInstancesInstanceGroupManagerRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.RecreateInstances. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + instance_group_managers_recreate_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersRecreateInstancesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_recreate_instances_request_resource = proto.Field( + proto.MESSAGE, + number=21405952, + message='InstanceGroupManagersRecreateInstancesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class RecreateInstancesRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.RecreateInstances. See the method + description for details. + + Attributes: + instance_group_manager (str): + Name of the managed instance group. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_group_managers_recreate_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersRecreateRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_managers_recreate_request_resource = proto.Field( + proto.MESSAGE, + number=170999316, + message='RegionInstanceGroupManagersRecreateRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class Reference(proto.Message): + r"""Represents a reference to a resource. + + Attributes: + kind (str): + [Output Only] Type of the resource. Always compute#reference + for references. + + This field is a member of `oneof`_ ``_kind``. + reference_type (str): + A description of the reference type with no implied + semantics. Possible values include: 1. MEMBER_OF + + This field is a member of `oneof`_ ``_reference_type``. + referrer (str): + URL of the resource which refers to the + target. + + This field is a member of `oneof`_ ``_referrer``. + target (str): + URL of the resource to which this reference + points. + + This field is a member of `oneof`_ ``_target``. + """ + + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + reference_type = proto.Field( + proto.STRING, + number=247521198, + optional=True, + ) + referrer = proto.Field( + proto.STRING, + number=351173663, + optional=True, + ) + target = proto.Field( + proto.STRING, + number=192835985, + optional=True, + ) + + +class Region(proto.Message): + r"""Represents a Region resource. A region is a geographical area + where a resource is located. For more information, read Regions + and Zones. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + deprecated (google.cloud.compute_v1.types.DeprecationStatus): + [Output Only] The deprecation status associated with this + region. + + This field is a member of `oneof`_ ``_deprecated``. + description (str): + [Output Only] Textual description of the resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#region + for regions. + + This field is a member of `oneof`_ ``_kind``. + name (str): + [Output Only] Name of the resource. + + This field is a member of `oneof`_ ``_name``. + quotas (Sequence[google.cloud.compute_v1.types.Quota]): + [Output Only] Quotas assigned to this region. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + status (str): + [Output Only] Status of the region, either UP or DOWN. Check + the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + supports_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_supports_pzs``. + zones (Sequence[str]): + [Output Only] A list of zones available in this region, in + the form of resource URLs. + """ + class Status(proto.Enum): + r"""[Output Only] Status of the region, either UP or DOWN.""" + UNDEFINED_STATUS = 0 + DOWN = 2104482 + UP = 2715 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + deprecated = proto.Field( + proto.MESSAGE, + number=515138995, + optional=True, + message='DeprecationStatus', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + quotas = proto.RepeatedField( + proto.MESSAGE, + number=125341947, + message='Quota', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + supports_pzs = proto.Field( + proto.BOOL, + number=83983214, + optional=True, + ) + zones = proto.RepeatedField( + proto.STRING, + number=116085319, + ) + + +class RegionAutoscalerList(proto.Message): + r"""Contains a list of autoscalers. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Autoscaler]): + A list of Autoscaler resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Autoscaler', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RegionDiskTypeList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.DiskType]): + A list of DiskType resources. + kind (str): + [Output Only] Type of resource. Always + compute#regionDiskTypeList for region disk types. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='DiskType', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RegionDisksAddResourcePoliciesRequest(proto.Message): + r""" + + Attributes: + resource_policies (Sequence[str]): + Resource policies to be added to this disk. + """ + + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + + +class RegionDisksRemoveResourcePoliciesRequest(proto.Message): + r""" + + Attributes: + resource_policies (Sequence[str]): + Resource policies to be removed from this + disk. + """ + + resource_policies = proto.RepeatedField( + proto.STRING, + number=22220385, + ) + + +class RegionDisksResizeRequest(proto.Message): + r""" + + Attributes: + size_gb (int): + The new size of the regional persistent disk, + which is specified in GB. + + This field is a member of `oneof`_ ``_size_gb``. + """ + + size_gb = proto.Field( + proto.INT64, + number=494929369, + optional=True, + ) + + +class RegionInstanceGroupList(proto.Message): + r"""Contains a list of InstanceGroup resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceGroup]): + A list of InstanceGroup resources. + kind (str): + The resource type. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceGroup', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RegionInstanceGroupManagerDeleteInstanceConfigReq(proto.Message): + r"""RegionInstanceGroupManagers.deletePerInstanceConfigs + + Attributes: + names (Sequence[str]): + The list of instance names for which we want + to delete per-instance configs on this managed + instance group. + """ + + names = proto.RepeatedField( + proto.STRING, + number=104585032, + ) + + +class RegionInstanceGroupManagerList(proto.Message): + r"""Contains a list of managed instance groups. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceGroupManager]): + A list of InstanceGroupManager resources. + kind (str): + [Output Only] The resource type, which is always + compute#instanceGroupManagerList for a list of managed + instance groups that exist in th regional scope. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceGroupManager', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RegionInstanceGroupManagerPatchInstanceConfigReq(proto.Message): + r"""RegionInstanceGroupManagers.patchPerInstanceConfigs + + Attributes: + per_instance_configs (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + The list of per-instance configs to insert or + patch on this managed instance group. + """ + + per_instance_configs = proto.RepeatedField( + proto.MESSAGE, + number=526265001, + message='PerInstanceConfig', + ) + + +class RegionInstanceGroupManagerUpdateInstanceConfigReq(proto.Message): + r"""RegionInstanceGroupManagers.updatePerInstanceConfigs + + Attributes: + per_instance_configs (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + The list of per-instance configs to insert or + patch on this managed instance group. + """ + + per_instance_configs = proto.RepeatedField( + proto.MESSAGE, + number=526265001, + message='PerInstanceConfig', + ) + + +class RegionInstanceGroupManagersAbandonInstancesRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[str]): + The URLs of one or more instances to abandon. This can be a + full URL or a partial URL, such as + zones/[ZONE]/instances/[INSTANCE_NAME]. + """ + + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + + +class RegionInstanceGroupManagersApplyUpdatesRequest(proto.Message): + r"""RegionInstanceGroupManagers.applyUpdatesToInstances + + Attributes: + all_instances (bool): + Flag to update all instances instead of + specified list of ���instances���. If the flag + is set to true then the instances may not be + specified in the request. + + This field is a member of `oneof`_ ``_all_instances``. + instances (Sequence[str]): + The list of URLs of one or more instances for which you want + to apply updates. Each URL can be a full URL or a partial + URL, such as zones/[ZONE]/instances/[INSTANCE_NAME]. + minimal_action (str): + The minimal action that you want to perform + on each instance during the update: - REPLACE: + At minimum, delete the instance and create it + again. - RESTART: Stop the instance and start it + again. - REFRESH: Do not stop the instance. - + NONE: Do not disrupt the instance at all. By + default, the minimum action is NONE. If your + update requires a more disruptive action than + you set with this flag, the necessary action is + performed to execute the update. + + This field is a member of `oneof`_ ``_minimal_action``. + most_disruptive_allowed_action (str): + The most disruptive action that you want to + perform on each instance during the update: - + REPLACE: Delete the instance and create it + again. - RESTART: Stop the instance and start it + again. - REFRESH: Do not stop the instance. - + NONE: Do not disrupt the instance at all. By + default, the most disruptive allowed action is + REPLACE. If your update requires a more + disruptive action than you set with this flag, + the update request will fail. + + This field is a member of `oneof`_ ``_most_disruptive_allowed_action``. + """ + + all_instances = proto.Field( + proto.BOOL, + number=403676512, + optional=True, + ) + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + minimal_action = proto.Field( + proto.STRING, + number=270567060, + optional=True, + ) + most_disruptive_allowed_action = proto.Field( + proto.STRING, + number=66103053, + optional=True, + ) + + +class RegionInstanceGroupManagersCreateInstancesRequest(proto.Message): + r"""RegionInstanceGroupManagers.createInstances + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + [Required] List of specifications of per-instance configs. + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='PerInstanceConfig', + ) + + +class RegionInstanceGroupManagersDeleteInstancesRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[str]): + The URLs of one or more instances to delete. This can be a + full URL or a partial URL, such as + zones/[ZONE]/instances/[INSTANCE_NAME]. + skip_instances_on_validation_error (bool): + Specifies whether the request should proceed despite the + inclusion of instances that are not members of the group or + that are already in the process of being deleted or + abandoned. If this field is set to ``false`` and such an + instance is specified in the request, the operation fails. + The operation always fails if the request contains a + malformed instance URL or a reference to an instance that + exists in a zone or region other than the group's zone or + region. + + This field is a member of `oneof`_ ``_skip_instances_on_validation_error``. + """ + + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + skip_instances_on_validation_error = proto.Field( + proto.BOOL, + number=40631073, + optional=True, + ) + + +class RegionInstanceGroupManagersListErrorsResponse(proto.Message): + r""" + + Attributes: + items (Sequence[google.cloud.compute_v1.types.InstanceManagedByIgmError]): + [Output Only] The list of errors of the managed instance + group. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + """ + + @property + def raw_page(self): + return self + + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceManagedByIgmError', + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + + +class RegionInstanceGroupManagersListInstanceConfigsResp(proto.Message): + r""" + + Attributes: + items (Sequence[google.cloud.compute_v1.types.PerInstanceConfig]): + [Output Only] The list of PerInstanceConfig. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='PerInstanceConfig', + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RegionInstanceGroupManagersListInstancesResponse(proto.Message): + r""" + + Attributes: + managed_instances (Sequence[google.cloud.compute_v1.types.ManagedInstance]): + A list of managed instances. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + """ + + @property + def raw_page(self): + return self + + managed_instances = proto.RepeatedField( + proto.MESSAGE, + number=336219614, + message='ManagedInstance', + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + + +class RegionInstanceGroupManagersRecreateRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[str]): + The URLs of one or more instances to recreate. This can be a + full URL or a partial URL, such as + zones/[ZONE]/instances/[INSTANCE_NAME]. + """ + + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + + +class RegionInstanceGroupManagersSetTargetPoolsRequest(proto.Message): + r""" + + Attributes: + fingerprint (str): + Fingerprint of the target pools information, + which is a hash of the contents. This field is + used for optimistic locking when you update the + target pool entries. This field is optional. + + This field is a member of `oneof`_ ``_fingerprint``. + target_pools (Sequence[str]): + The URL of all TargetPool resources to which + instances in the instanceGroup field are added. + The target pools automatically apply to all of + the instances in the managed instance group. + """ + + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + target_pools = proto.RepeatedField( + proto.STRING, + number=336072617, + ) + + +class RegionInstanceGroupManagersSetTemplateRequest(proto.Message): + r""" + + Attributes: + instance_template (str): + URL of the InstanceTemplate resource from + which all new instances will be created. + + This field is a member of `oneof`_ ``_instance_template``. + """ + + instance_template = proto.Field( + proto.STRING, + number=309248228, + optional=True, + ) + + +class RegionInstanceGroupsListInstances(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.InstanceWithNamedPorts]): + A list of InstanceWithNamedPorts resources. + kind (str): + The resource type. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='InstanceWithNamedPorts', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RegionInstanceGroupsListInstancesRequest(proto.Message): + r""" + + Attributes: + instance_state (str): + Instances in which state should be returned. + Valid options are: 'ALL', 'RUNNING'. By default, + it lists all instances. Check the InstanceState + enum for the list of possible values. + + This field is a member of `oneof`_ ``_instance_state``. + port_name (str): + Name of port user is interested in. It is + optional. If it is set, only information about + this ports will be returned. If it is not set, + all the named ports will be returned. Always + lists all instances. + + This field is a member of `oneof`_ ``_port_name``. + """ + class InstanceState(proto.Enum): + r"""Instances in which state should be returned. Valid options + are: 'ALL', 'RUNNING'. By default, it lists all instances. + """ + UNDEFINED_INSTANCE_STATE = 0 + ALL = 64897 + RUNNING = 121282975 + + instance_state = proto.Field( + proto.STRING, + number=92223591, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + + +class RegionInstanceGroupsSetNamedPortsRequest(proto.Message): + r""" + + Attributes: + fingerprint (str): + The fingerprint of the named ports + information for this instance group. Use this + optional property to prevent conflicts when + multiple users change the named ports settings + concurrently. Obtain the fingerprint with the + instanceGroups.get method. Then, include the + fingerprint in your request to ensure that you + do not overwrite changes that were applied from + another concurrent request. + + This field is a member of `oneof`_ ``_fingerprint``. + named_ports (Sequence[google.cloud.compute_v1.types.NamedPort]): + The list of named ports to set for this + instance group. + """ + + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + named_ports = proto.RepeatedField( + proto.MESSAGE, + number=427598732, + message='NamedPort', + ) + + +class RegionList(proto.Message): + r"""Contains a list of region resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Region]): + A list of Region resources. + kind (str): + [Output Only] Type of resource. Always compute#regionList + for lists of regions. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Region', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RegionSetLabelsRequest(proto.Message): + r""" + + Attributes: + label_fingerprint (str): + The fingerprint of the previous set of labels + for this resource, used to detect conflicts. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash in order to update or + change labels. Make a get() request to the + resource to get the latest fingerprint. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.RegionSetLabelsRequest.LabelsEntry]): + The labels to set for this resource. + """ + + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + + +class RegionSetPolicyRequest(proto.Message): + r""" + + Attributes: + bindings (Sequence[google.cloud.compute_v1.types.Binding]): + Flatten Policy to create a backwacd + compatible wire-format. Deprecated. Use 'policy' + to specify bindings. + etag (str): + Flatten Policy to create a backward + compatible wire-format. Deprecated. Use 'policy' + to specify the etag. + + This field is a member of `oneof`_ ``_etag``. + policy (google.cloud.compute_v1.types.Policy): + REQUIRED: The complete policy to be applied + to the 'resource'. The size of the policy is + limited to a few 10s of KB. An empty policy is + in general a valid policy but certain services + (like Projects) might reject them. + + This field is a member of `oneof`_ ``_policy``. + """ + + bindings = proto.RepeatedField( + proto.MESSAGE, + number=403251854, + message='Binding', + ) + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + policy = proto.Field( + proto.MESSAGE, + number=91071794, + optional=True, + message='Policy', + ) + + +class RegionTargetHttpsProxiesSetSslCertificatesRequest(proto.Message): + r""" + + Attributes: + ssl_certificates (Sequence[str]): + New set of SslCertificate resources to + associate with this TargetHttpsProxy resource. + Currently exactly one SslCertificate resource + must be specified. + """ + + ssl_certificates = proto.RepeatedField( + proto.STRING, + number=366006543, + ) + + +class RegionUrlMapsValidateRequest(proto.Message): + r""" + + Attributes: + resource (google.cloud.compute_v1.types.UrlMap): + Content of the UrlMap to be validated. + + This field is a member of `oneof`_ ``_resource``. + """ + + resource = proto.Field( + proto.MESSAGE, + number=195806222, + optional=True, + message='UrlMap', + ) + + +class RemoveAssociationFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.RemoveAssociation. See + the method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + name (str): + Name for the attachment that will be removed. + + This field is a member of `oneof`_ ``_name``. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class RemoveHealthCheckTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.RemoveHealthCheck. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_pool (str): + Name of the target pool to remove health + checks from. + target_pools_remove_health_check_request_resource (google.cloud.compute_v1.types.TargetPoolsRemoveHealthCheckRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + target_pools_remove_health_check_request_resource = proto.Field( + proto.MESSAGE, + number=304985011, + message='TargetPoolsRemoveHealthCheckRequest', + ) + + +class RemoveInstanceTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.RemoveInstance. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_pool (str): + Name of the TargetPool resource to remove + instances from. + target_pools_remove_instance_request_resource (google.cloud.compute_v1.types.TargetPoolsRemoveInstanceRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + target_pools_remove_instance_request_resource = proto.Field( + proto.MESSAGE, + number=29548547, + message='TargetPoolsRemoveInstanceRequest', + ) + + +class RemoveInstancesInstanceGroupRequest(proto.Message): + r"""A request message for InstanceGroups.RemoveInstances. See the + method description for details. + + Attributes: + instance_group (str): + The name of the instance group where the + specified instances will be removed. + instance_groups_remove_instances_request_resource (google.cloud.compute_v1.types.InstanceGroupsRemoveInstancesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the instance group + is located. + """ + + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + instance_groups_remove_instances_request_resource = proto.Field( + proto.MESSAGE, + number=390981817, + message='InstanceGroupsRemoveInstancesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class RemovePeeringNetworkRequest(proto.Message): + r"""A request message for Networks.RemovePeering. See the method + description for details. + + Attributes: + network (str): + Name of the network resource to remove + peering from. + networks_remove_peering_request_resource (google.cloud.compute_v1.types.NetworksRemovePeeringRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + networks_remove_peering_request_resource = proto.Field( + proto.MESSAGE, + number=421162494, + message='NetworksRemovePeeringRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class RemoveResourcePoliciesDiskRequest(proto.Message): + r"""A request message for Disks.RemoveResourcePolicies. See the + method description for details. + + Attributes: + disk (str): + The disk name for this request. + disks_remove_resource_policies_request_resource (google.cloud.compute_v1.types.DisksRemoveResourcePoliciesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + disks_remove_resource_policies_request_resource = proto.Field( + proto.MESSAGE, + number=436756718, + message='DisksRemoveResourcePoliciesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class RemoveResourcePoliciesInstanceRequest(proto.Message): + r"""A request message for Instances.RemoveResourcePolicies. See + the method description for details. + + Attributes: + instance (str): + The instance name for this request. + instances_remove_resource_policies_request_resource (google.cloud.compute_v1.types.InstancesRemoveResourcePoliciesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_remove_resource_policies_request_resource = proto.Field( + proto.MESSAGE, + number=49229558, + message='InstancesRemoveResourcePoliciesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class RemoveResourcePoliciesRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.RemoveResourcePolicies. See + the method description for details. + + Attributes: + disk (str): + The disk name for this request. + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + region_disks_remove_resource_policies_request_resource (google.cloud.compute_v1.types.RegionDisksRemoveResourcePoliciesRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_disks_remove_resource_policies_request_resource = proto.Field( + proto.MESSAGE, + number=8741283, + message='RegionDisksRemoveResourcePoliciesRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class RemoveRuleFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.RemoveRule. See the + method description for details. + + Attributes: + firewall_policy (str): + Name of the firewall policy to update. + priority (int): + The priority of the rule to remove from the + firewall policy. + + This field is a member of `oneof`_ ``_priority``. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall_policy = proto.Field( + proto.STRING, + number=498173265, + ) + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class RemoveRuleSecurityPolicyRequest(proto.Message): + r"""A request message for SecurityPolicies.RemoveRule. See the + method description for details. + + Attributes: + priority (int): + The priority of the rule to remove from the + security policy. + + This field is a member of `oneof`_ ``_priority``. + project (str): + Project ID for this request. + security_policy (str): + Name of the security policy to update. + """ + + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + security_policy = proto.Field( + proto.STRING, + number=171082513, + ) + + +class RequestMirrorPolicy(proto.Message): + r"""A policy that specifies how requests intended for the route's + backends are shadowed to a separate mirrored backend service. + Loadbalancer does not wait for responses from the shadow + service. Prior to sending traffic to the shadow service, the + host / authority header is suffixed with -shadow. + + Attributes: + backend_service (str): + The full or partial URL to the BackendService + resource being mirrored to. + + This field is a member of `oneof`_ ``_backend_service``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + optional=True, + ) + + +class Reservation(proto.Message): + r"""Represents a reservation resource. A reservation ensures that + capacity is held in a specific zone even if the reserved VMs are + not running. For more information, read Reserving zonal + resources. + + Attributes: + commitment (str): + [Output Only] Full or partial URL to a parent commitment. + This field displays for reservations that are tied to a + commitment. + + This field is a member of `oneof`_ ``_commitment``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#reservations for reservations. + + This field is a member of `oneof`_ ``_kind``. + name (str): + The name of the resource, provided by the client when + initially creating the resource. The resource name must be + 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + satisfies_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + self_link (str): + [Output Only] Server-defined fully-qualified URL for this + resource. + + This field is a member of `oneof`_ ``_self_link``. + specific_reservation (google.cloud.compute_v1.types.AllocationSpecificSKUReservation): + Reservation for instances with specific + machine shapes. + + This field is a member of `oneof`_ ``_specific_reservation``. + specific_reservation_required (bool): + Indicates whether the reservation can be + consumed by VMs with affinity for "any" + reservation. If the field is set, then only VMs + that target the reservation by name can consume + from this reservation. + + This field is a member of `oneof`_ ``_specific_reservation_required``. + status (str): + [Output Only] The status of the reservation. Check the + Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + zone (str): + Zone in which the reservation resides. A zone + must be provided if the reservation is created + within a commitment. + + This field is a member of `oneof`_ ``_zone``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the reservation.""" + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + INVALID = 530283991 + READY = 77848963 + UPDATING = 494614342 + + commitment = proto.Field( + proto.STRING, + number=482134805, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + specific_reservation = proto.Field( + proto.MESSAGE, + number=404901951, + optional=True, + message='AllocationSpecificSKUReservation', + ) + specific_reservation_required = proto.Field( + proto.BOOL, + number=226550687, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class ReservationAffinity(proto.Message): + r"""Specifies the reservations that this instance can consume + from. + + Attributes: + consume_reservation_type (str): + Specifies the type of reservation from which this instance + can consume resources: ANY_RESERVATION (default), + SPECIFIC_RESERVATION, or NO_RESERVATION. See Consuming + reserved instances for examples. Check the + ConsumeReservationType enum for the list of possible values. + + This field is a member of `oneof`_ ``_consume_reservation_type``. + key (str): + Corresponds to the label key of a reservation resource. To + target a SPECIFIC_RESERVATION by name, specify + googleapis.com/reservation-name as the key and specify the + name of your reservation as its value. + + This field is a member of `oneof`_ ``_key``. + values (Sequence[str]): + Corresponds to the label values of a + reservation resource. This can be either a name + to a reservation in the same project or + "projects/different-project/reservations/some- + reservation-name" to target a shared reservation + in the same zone but in a different project. + """ + class ConsumeReservationType(proto.Enum): + r"""Specifies the type of reservation from which this instance can + consume resources: ANY_RESERVATION (default), SPECIFIC_RESERVATION, + or NO_RESERVATION. See Consuming reserved instances for examples. + """ + UNDEFINED_CONSUME_RESERVATION_TYPE = 0 + ANY_RESERVATION = 200008121 + NO_RESERVATION = 169322030 + SPECIFIC_RESERVATION = 229889055 + UNSPECIFIED = 526786327 + + consume_reservation_type = proto.Field( + proto.STRING, + number=300736944, + optional=True, + ) + key = proto.Field( + proto.STRING, + number=106079, + optional=True, + ) + values = proto.RepeatedField( + proto.STRING, + number=249928994, + ) + + +class ReservationAggregatedList(proto.Message): + r"""Contains a list of reservations. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ReservationAggregatedList.ItemsEntry]): + A list of Allocation resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='ReservationsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ReservationList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Reservation]): + [Output Only] A list of Allocation resources. + kind (str): + [Output Only] Type of resource.Always + compute#reservationsList for listsof reservations + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Reservation', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ReservationsResizeRequest(proto.Message): + r""" + + Attributes: + specific_sku_count (int): + Number of allocated resources can be resized + with minimum = 1 and maximum = 1000. + + This field is a member of `oneof`_ ``_specific_sku_count``. + """ + + specific_sku_count = proto.Field( + proto.INT64, + number=13890720, + optional=True, + ) + + +class ReservationsScopedList(proto.Message): + r""" + + Attributes: + reservations (Sequence[google.cloud.compute_v1.types.Reservation]): + A list of reservations contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of reservations when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + reservations = proto.RepeatedField( + proto.MESSAGE, + number=399717927, + message='Reservation', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ResetInstanceRequest(proto.Message): + r"""A request message for Instances.Reset. See the method + description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ResizeDiskRequest(proto.Message): + r"""A request message for Disks.Resize. See the method + description for details. + + Attributes: + disk (str): + The name of the persistent disk. + disks_resize_request_resource (google.cloud.compute_v1.types.DisksResizeRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + disks_resize_request_resource = proto.Field( + proto.MESSAGE, + number=78307616, + message='DisksResizeRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ResizeInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.Resize. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + size (int): + The number of running instances that the + managed instance group should maintain at any + given time. The group automatically adds or + removes instances to maintain the number of + instances specified by this parameter. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + size = proto.Field( + proto.INT32, + number=3530753, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ResizeRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.Resize. See the method + description for details. + + Attributes: + disk (str): + Name of the regional persistent disk. + project (str): + The project ID for this request. + region (str): + Name of the region for this request. + region_disks_resize_request_resource (google.cloud.compute_v1.types.RegionDisksResizeRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + disk = proto.Field( + proto.STRING, + number=3083677, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_disks_resize_request_resource = proto.Field( + proto.MESSAGE, + number=446633237, + message='RegionDisksResizeRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class ResizeRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for RegionInstanceGroupManagers.Resize. See + the method description for details. + + Attributes: + instance_group_manager (str): + Name of the managed instance group. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + size (int): + Number of instances that should exist in this + instance group manager. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + size = proto.Field( + proto.INT32, + number=3530753, + ) + + +class ResizeReservationRequest(proto.Message): + r"""A request message for Reservations.Resize. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + reservation (str): + Name of the reservation to update. + reservations_resize_request_resource (google.cloud.compute_v1.types.ReservationsResizeRequest): + The body resource for this request + zone (str): + Name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + reservation = proto.Field( + proto.STRING, + number=47530956, + ) + reservations_resize_request_resource = proto.Field( + proto.MESSAGE, + number=389262801, + message='ReservationsResizeRequest', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class ResourceCommitment(proto.Message): + r"""Commitment for a particular resource (a Commitment is + composed of one or more of these). + + Attributes: + accelerator_type (str): + Name of the accelerator type resource. + Applicable only when the type is ACCELERATOR. + + This field is a member of `oneof`_ ``_accelerator_type``. + amount (int): + The amount of the resource purchased (in a + type-dependent unit, such as bytes). For vCPUs, + this can just be an integer. For memory, this + must be provided in MB. Memory must be a + multiple of 256 MB, with up to 6.5GB of memory + per every vCPU. + + This field is a member of `oneof`_ ``_amount``. + type_ (str): + Type of resource for which this commitment + applies. Possible values are VCPU and MEMORY + Check the Type enum for the list of possible + values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""Type of resource for which this commitment applies. Possible + values are VCPU and MEMORY + """ + UNDEFINED_TYPE = 0 + ACCELERATOR = 429815371 + LOCAL_SSD = 508934896 + MEMORY = 123056385 + UNSPECIFIED = 526786327 + VCPU = 2628978 + + accelerator_type = proto.Field( + proto.STRING, + number=138031246, + optional=True, + ) + amount = proto.Field( + proto.INT64, + number=196759640, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class ResourceGroupReference(proto.Message): + r""" + + Attributes: + group (str): + A URI referencing one of the instance groups + or network endpoint groups listed in the backend + service. + + This field is a member of `oneof`_ ``_group``. + """ + + group = proto.Field( + proto.STRING, + number=98629247, + optional=True, + ) + + +class ResourcePoliciesScopedList(proto.Message): + r""" + + Attributes: + resource_policies (Sequence[google.cloud.compute_v1.types.ResourcePolicy]): + A list of resourcePolicies contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of resourcePolicies when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + resource_policies = proto.RepeatedField( + proto.MESSAGE, + number=22220385, + message='ResourcePolicy', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ResourcePolicy(proto.Message): + r"""Represents a Resource Policy resource. You can use resource + policies to schedule actions for some Compute Engine resources. + For example, you can use them to schedule persistent disk + snapshots. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + + This field is a member of `oneof`_ ``_description``. + group_placement_policy (google.cloud.compute_v1.types.ResourcePolicyGroupPlacementPolicy): + Resource policy for instances for placement + configuration. + + This field is a member of `oneof`_ ``_group_placement_policy``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + instance_schedule_policy (google.cloud.compute_v1.types.ResourcePolicyInstanceSchedulePolicy): + Resource policy for scheduling instance + operations. + + This field is a member of `oneof`_ ``_instance_schedule_policy``. + kind (str): + [Output Only] Type of the resource. Always + compute#resource_policies for resource policies. + + This field is a member of `oneof`_ ``_kind``. + name (str): + The name of the resource, provided by the client when + initially creating the resource. The resource name must be + 1-63 characters long, and comply with RFC1035. Specifically, + the name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + region (str): + + This field is a member of `oneof`_ ``_region``. + resource_status (google.cloud.compute_v1.types.ResourcePolicyResourceStatus): + [Output Only] The system status of the resource policy. + + This field is a member of `oneof`_ ``_resource_status``. + self_link (str): + [Output Only] Server-defined fully-qualified URL for this + resource. + + This field is a member of `oneof`_ ``_self_link``. + snapshot_schedule_policy (google.cloud.compute_v1.types.ResourcePolicySnapshotSchedulePolicy): + Resource policy for persistent disks for + creating snapshots. + + This field is a member of `oneof`_ ``_snapshot_schedule_policy``. + status (str): + [Output Only] The status of resource policy creation. Check + the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of resource policy creation.""" + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + EXPIRED = 482489093 + INVALID = 530283991 + READY = 77848963 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + group_placement_policy = proto.Field( + proto.MESSAGE, + number=10931596, + optional=True, + message='ResourcePolicyGroupPlacementPolicy', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + instance_schedule_policy = proto.Field( + proto.MESSAGE, + number=344877104, + optional=True, + message='ResourcePolicyInstanceSchedulePolicy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + resource_status = proto.Field( + proto.MESSAGE, + number=249429315, + optional=True, + message='ResourcePolicyResourceStatus', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + snapshot_schedule_policy = proto.Field( + proto.MESSAGE, + number=218131295, + optional=True, + message='ResourcePolicySnapshotSchedulePolicy', + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class ResourcePolicyAggregatedList(proto.Message): + r"""Contains a list of resourcePolicies. + + Attributes: + etag (str): + + This field is a member of `oneof`_ ``_etag``. + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ResourcePolicyAggregatedList.ItemsEntry]): + A list of ResourcePolicy resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='ResourcePoliciesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ResourcePolicyDailyCycle(proto.Message): + r"""Time window specified for daily operations. + + Attributes: + days_in_cycle (int): + Defines a schedule with units measured in + months. The value determines how many months + pass between the start of each cycle. + + This field is a member of `oneof`_ ``_days_in_cycle``. + duration (str): + [Output only] A predetermined duration for the window, + automatically chosen to be the smallest possible in the + given scenario. + + This field is a member of `oneof`_ ``_duration``. + start_time (str): + Start time of the window. This must be in UTC + format that resolves to one of 00:00, 04:00, + 08:00, 12:00, 16:00, or 20:00. For example, both + 13:00-5 and 08:00 are valid. + + This field is a member of `oneof`_ ``_start_time``. + """ + + days_in_cycle = proto.Field( + proto.INT32, + number=369790004, + optional=True, + ) + duration = proto.Field( + proto.STRING, + number=155471252, + optional=True, + ) + start_time = proto.Field( + proto.STRING, + number=37467274, + optional=True, + ) + + +class ResourcePolicyGroupPlacementPolicy(proto.Message): + r"""A GroupPlacementPolicy specifies resource placement + configuration. It specifies the failure bucket separation as + well as network locality + + Attributes: + availability_domain_count (int): + The number of availability domains instances + will be spread across. If two instances are in + different availability domain, they will not be + put in the same low latency network + + This field is a member of `oneof`_ ``_availability_domain_count``. + collocation (str): + Specifies network collocation + Check the Collocation enum for the list of + possible values. + + This field is a member of `oneof`_ ``_collocation``. + vm_count (int): + Number of vms in this placement group + + This field is a member of `oneof`_ ``_vm_count``. + """ + class Collocation(proto.Enum): + r"""Specifies network collocation""" + UNDEFINED_COLLOCATION = 0 + COLLOCATED = 103257554 + UNSPECIFIED_COLLOCATION = 464308205 + + availability_domain_count = proto.Field( + proto.INT32, + number=12453432, + optional=True, + ) + collocation = proto.Field( + proto.STRING, + number=511156533, + optional=True, + ) + vm_count = proto.Field( + proto.INT32, + number=261463431, + optional=True, + ) + + +class ResourcePolicyHourlyCycle(proto.Message): + r"""Time window specified for hourly operations. + + Attributes: + duration (str): + [Output only] Duration of the time window, automatically + chosen to be smallest possible in the given scenario. + + This field is a member of `oneof`_ ``_duration``. + hours_in_cycle (int): + Defines a schedule with units measured in + hours. The value determines how many hours pass + between the start of each cycle. + + This field is a member of `oneof`_ ``_hours_in_cycle``. + start_time (str): + Time within the window to start the operations. It must be + in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + + This field is a member of `oneof`_ ``_start_time``. + """ + + duration = proto.Field( + proto.STRING, + number=155471252, + optional=True, + ) + hours_in_cycle = proto.Field( + proto.INT32, + number=526763132, + optional=True, + ) + start_time = proto.Field( + proto.STRING, + number=37467274, + optional=True, + ) + + +class ResourcePolicyInstanceSchedulePolicy(proto.Message): + r"""An InstanceSchedulePolicy specifies when and how frequent + certain operations are performed on the instance. + + Attributes: + expiration_time (str): + The expiration time of the schedule. The + timestamp is an RFC3339 string. + + This field is a member of `oneof`_ ``_expiration_time``. + start_time (str): + The start time of the schedule. The timestamp + is an RFC3339 string. + + This field is a member of `oneof`_ ``_start_time``. + time_zone (str): + Specifies the time zone to be used in interpreting + Schedule.schedule. The value of this field must be a time + zone name from the tz database: + http://en.wikipedia.org/wiki/Tz_database. + + This field is a member of `oneof`_ ``_time_zone``. + vm_start_schedule (google.cloud.compute_v1.types.ResourcePolicyInstanceSchedulePolicySchedule): + Specifies the schedule for starting + instances. + + This field is a member of `oneof`_ ``_vm_start_schedule``. + vm_stop_schedule (google.cloud.compute_v1.types.ResourcePolicyInstanceSchedulePolicySchedule): + Specifies the schedule for stopping + instances. + + This field is a member of `oneof`_ ``_vm_stop_schedule``. + """ + + expiration_time = proto.Field( + proto.STRING, + number=230299229, + optional=True, + ) + start_time = proto.Field( + proto.STRING, + number=37467274, + optional=True, + ) + time_zone = proto.Field( + proto.STRING, + number=36848094, + optional=True, + ) + vm_start_schedule = proto.Field( + proto.MESSAGE, + number=17762396, + optional=True, + message='ResourcePolicyInstanceSchedulePolicySchedule', + ) + vm_stop_schedule = proto.Field( + proto.MESSAGE, + number=426242732, + optional=True, + message='ResourcePolicyInstanceSchedulePolicySchedule', + ) + + +class ResourcePolicyInstanceSchedulePolicySchedule(proto.Message): + r"""Schedule for an instance operation. + + Attributes: + schedule (str): + Specifies the frequency for the operation, + using the unix-cron format. + + This field is a member of `oneof`_ ``_schedule``. + """ + + schedule = proto.Field( + proto.STRING, + number=375820951, + optional=True, + ) + + +class ResourcePolicyList(proto.Message): + r""" + + Attributes: + etag (str): + + This field is a member of `oneof`_ ``_etag``. + id (str): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ResourcePolicy]): + [Output Only] A list of ResourcePolicy resources. + kind (str): + [Output Only] Type of resource.Always + compute#resourcePoliciesList for listsof resourcePolicies + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='ResourcePolicy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ResourcePolicyResourceStatus(proto.Message): + r"""Contains output only fields. Use this sub-message for all + output fields set on ResourcePolicy. The internal structure of + this "status" field should mimic the structure of ResourcePolicy + proto specification. + + Attributes: + instance_schedule_policy (google.cloud.compute_v1.types.ResourcePolicyResourceStatusInstanceSchedulePolicyStatus): + [Output Only] Specifies a set of output values reffering to + the instance_schedule_policy system status. This field + should have the same name as corresponding policy field. + + This field is a member of `oneof`_ ``_instance_schedule_policy``. + """ + + instance_schedule_policy = proto.Field( + proto.MESSAGE, + number=344877104, + optional=True, + message='ResourcePolicyResourceStatusInstanceSchedulePolicyStatus', + ) + + +class ResourcePolicyResourceStatusInstanceSchedulePolicyStatus(proto.Message): + r""" + + Attributes: + last_run_start_time (str): + [Output Only] The last time the schedule successfully ran. + The timestamp is an RFC3339 string. + + This field is a member of `oneof`_ ``_last_run_start_time``. + next_run_start_time (str): + [Output Only] The next time the schedule is planned to run. + The actual time might be slightly different. The timestamp + is an RFC3339 string. + + This field is a member of `oneof`_ ``_next_run_start_time``. + """ + + last_run_start_time = proto.Field( + proto.STRING, + number=303069063, + optional=True, + ) + next_run_start_time = proto.Field( + proto.STRING, + number=318642570, + optional=True, + ) + + +class ResourcePolicySnapshotSchedulePolicy(proto.Message): + r"""A snapshot schedule policy specifies when and how frequently + snapshots are to be created for the target disk. Also specifies + how many and how long these scheduled snapshots should be + retained. + + Attributes: + retention_policy (google.cloud.compute_v1.types.ResourcePolicySnapshotSchedulePolicyRetentionPolicy): + Retention policy applied to snapshots created + by this resource policy. + + This field is a member of `oneof`_ ``_retention_policy``. + schedule (google.cloud.compute_v1.types.ResourcePolicySnapshotSchedulePolicySchedule): + A Vm Maintenance Policy specifies what kind + of infrastructure maintenance we are allowed to + perform on this VM and when. Schedule that is + applied to disks covered by this policy. + + This field is a member of `oneof`_ ``_schedule``. + snapshot_properties (google.cloud.compute_v1.types.ResourcePolicySnapshotSchedulePolicySnapshotProperties): + Properties with which snapshots are created + such as labels, encryption keys. + + This field is a member of `oneof`_ ``_snapshot_properties``. + """ + + retention_policy = proto.Field( + proto.MESSAGE, + number=68625779, + optional=True, + message='ResourcePolicySnapshotSchedulePolicyRetentionPolicy', + ) + schedule = proto.Field( + proto.MESSAGE, + number=375820951, + optional=True, + message='ResourcePolicySnapshotSchedulePolicySchedule', + ) + snapshot_properties = proto.Field( + proto.MESSAGE, + number=185371278, + optional=True, + message='ResourcePolicySnapshotSchedulePolicySnapshotProperties', + ) + + +class ResourcePolicySnapshotSchedulePolicyRetentionPolicy(proto.Message): + r"""Policy for retention of scheduled snapshots. + + Attributes: + max_retention_days (int): + Maximum age of the snapshot that is allowed + to be kept. + + This field is a member of `oneof`_ ``_max_retention_days``. + on_source_disk_delete (str): + Specifies the behavior to apply to scheduled + snapshots when the source disk is deleted. Check + the OnSourceDiskDelete enum for the list of + possible values. + + This field is a member of `oneof`_ ``_on_source_disk_delete``. + """ + class OnSourceDiskDelete(proto.Enum): + r"""Specifies the behavior to apply to scheduled snapshots when + the source disk is deleted. + """ + UNDEFINED_ON_SOURCE_DISK_DELETE = 0 + APPLY_RETENTION_POLICY = 535071332 + KEEP_AUTO_SNAPSHOTS = 258925689 + UNSPECIFIED_ON_SOURCE_DISK_DELETE = 239140769 + + max_retention_days = proto.Field( + proto.INT32, + number=324296979, + optional=True, + ) + on_source_disk_delete = proto.Field( + proto.STRING, + number=321955529, + optional=True, + ) + + +class ResourcePolicySnapshotSchedulePolicySchedule(proto.Message): + r"""A schedule for disks where the schedueled operations are + performed. + + Attributes: + daily_schedule (google.cloud.compute_v1.types.ResourcePolicyDailyCycle): + + This field is a member of `oneof`_ ``_daily_schedule``. + hourly_schedule (google.cloud.compute_v1.types.ResourcePolicyHourlyCycle): + + This field is a member of `oneof`_ ``_hourly_schedule``. + weekly_schedule (google.cloud.compute_v1.types.ResourcePolicyWeeklyCycle): + + This field is a member of `oneof`_ ``_weekly_schedule``. + """ + + daily_schedule = proto.Field( + proto.MESSAGE, + number=86159869, + optional=True, + message='ResourcePolicyDailyCycle', + ) + hourly_schedule = proto.Field( + proto.MESSAGE, + number=38328485, + optional=True, + message='ResourcePolicyHourlyCycle', + ) + weekly_schedule = proto.Field( + proto.MESSAGE, + number=359548053, + optional=True, + message='ResourcePolicyWeeklyCycle', + ) + + +class ResourcePolicySnapshotSchedulePolicySnapshotProperties(proto.Message): + r"""Specified snapshot properties for scheduled snapshots created + by this policy. + + Attributes: + chain_name (str): + Chain name that the snapshot is created in. + + This field is a member of `oneof`_ ``_chain_name``. + guest_flush (bool): + Indication to perform a 'guest aware' + snapshot. + + This field is a member of `oneof`_ ``_guest_flush``. + labels (Sequence[google.cloud.compute_v1.types.ResourcePolicySnapshotSchedulePolicySnapshotProperties.LabelsEntry]): + Labels to apply to scheduled snapshots. These + can be later modified by the setLabels method. + Label values may be empty. + storage_locations (Sequence[str]): + Cloud Storage bucket storage location of the + auto snapshot (regional or multi-regional). + """ + + chain_name = proto.Field( + proto.STRING, + number=68644169, + optional=True, + ) + guest_flush = proto.Field( + proto.BOOL, + number=385550813, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + storage_locations = proto.RepeatedField( + proto.STRING, + number=328005274, + ) + + +class ResourcePolicyWeeklyCycle(proto.Message): + r"""Time window specified for weekly operations. + + Attributes: + day_of_weeks (Sequence[google.cloud.compute_v1.types.ResourcePolicyWeeklyCycleDayOfWeek]): + Up to 7 intervals/windows, one for each day + of the week. + """ + + day_of_weeks = proto.RepeatedField( + proto.MESSAGE, + number=257871834, + message='ResourcePolicyWeeklyCycleDayOfWeek', + ) + + +class ResourcePolicyWeeklyCycleDayOfWeek(proto.Message): + r""" + + Attributes: + day (str): + Defines a schedule that runs on specific days + of the week. Specify one or more days. The + following options are available: MONDAY, + TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, + SUNDAY. Check the Day enum for the list of + possible values. + + This field is a member of `oneof`_ ``_day``. + duration (str): + [Output only] Duration of the time window, automatically + chosen to be smallest possible in the given scenario. + + This field is a member of `oneof`_ ``_duration``. + start_time (str): + Time within the window to start the operations. It must be + in format "HH:MM", where HH : [00-23] and MM : [00-00] GMT. + + This field is a member of `oneof`_ ``_start_time``. + """ + class Day(proto.Enum): + r"""Defines a schedule that runs on specific days of the week. + Specify one or more days. The following options are available: + MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY. + """ + UNDEFINED_DAY = 0 + FRIDAY = 471398751 + INVALID = 530283991 + MONDAY = 132310288 + SATURDAY = 279037881 + SUNDAY = 309626320 + THURSDAY = 207198682 + TUESDAY = 277509677 + WEDNESDAY = 422029110 + + day = proto.Field( + proto.STRING, + number=99228, + optional=True, + ) + duration = proto.Field( + proto.STRING, + number=155471252, + optional=True, + ) + start_time = proto.Field( + proto.STRING, + number=37467274, + optional=True, + ) + + +class Route(proto.Message): + r"""Represents a Route resource. A route defines a path from VM + instances in the VPC network to a specific destination. This + destination can be inside or outside the VPC network. For more + information, read the Routes overview. + + Attributes: + as_paths (Sequence[google.cloud.compute_v1.types.RouteAsPath]): + [Output Only] AS path. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this field when you create the resource. + + This field is a member of `oneof`_ ``_description``. + dest_range (str): + The destination range of outgoing packets + that this route applies to. Both IPv4 and IPv6 + are supported. + + This field is a member of `oneof`_ ``_dest_range``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of this resource. Always compute#routes + for Route resources. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?``. The first character must be + a lowercase letter, and all following characters (except for + the last character) must be a dash, lowercase letter, or + digit. The last character must be a lowercase letter or + digit. + + This field is a member of `oneof`_ ``_name``. + network (str): + Fully-qualified URL of the network that this + route applies to. + + This field is a member of `oneof`_ ``_network``. + next_hop_gateway (str): + The URL to a gateway that should handle + matching packets. You can only specify the + internet gateway using a full or partial valid + URL: projects/ project/global/gateways/default- + internet-gateway + + This field is a member of `oneof`_ ``_next_hop_gateway``. + next_hop_ilb (str): + The URL to a forwarding rule of type + loadBalancingScheme=INTERNAL that should handle + matching packets or the IP address of the + forwarding Rule. For example, the following are + all valid URLs: - 10.128.0.56 - + https://www.googleapis.com/compute/v1/projects/project/regions/region + /forwardingRules/forwardingRule - + regions/region/forwardingRules/forwardingRule + + This field is a member of `oneof`_ ``_next_hop_ilb``. + next_hop_instance (str): + The URL to an instance that should handle + matching packets. You can specify this as a full + or partial URL. For example: + https://www.googleapis.com/compute/v1/projects/project/zones/zone/instances/ + + This field is a member of `oneof`_ ``_next_hop_instance``. + next_hop_ip (str): + The network IP address of an instance that + should handle matching packets. Only IPv4 is + supported. + + This field is a member of `oneof`_ ``_next_hop_ip``. + next_hop_network (str): + The URL of the local network if it should + handle matching packets. + + This field is a member of `oneof`_ ``_next_hop_network``. + next_hop_peering (str): + [Output Only] The network peering name that should handle + matching packets, which should conform to RFC1035. + + This field is a member of `oneof`_ ``_next_hop_peering``. + next_hop_vpn_tunnel (str): + The URL to a VpnTunnel that should handle + matching packets. + + This field is a member of `oneof`_ ``_next_hop_vpn_tunnel``. + priority (int): + The priority of this route. Priority is used to break ties + in cases where there is more than one matching route of + equal prefix length. In cases where multiple routes have + equal prefix length, the one with the lowest-numbered + priority value wins. The default value is ``1000``. The + priority value must be from ``0`` to ``65535``, inclusive. + + This field is a member of `oneof`_ ``_priority``. + route_type (str): + [Output Only] The type of this route, which can be one of + the following values: - 'TRANSIT' for a transit route that + this router learned from another Cloud Router and will + readvertise to one of its BGP peers - 'SUBNET' for a route + from a subnet of the VPC - 'BGP' for a route learned from a + BGP peer of this router - 'STATIC' for a static route Check + the RouteType enum for the list of possible values. + + This field is a member of `oneof`_ ``_route_type``. + self_link (str): + [Output Only] Server-defined fully-qualified URL for this + resource. + + This field is a member of `oneof`_ ``_self_link``. + tags (Sequence[str]): + A list of instance tags to which this route + applies. + warnings (Sequence[google.cloud.compute_v1.types.Warnings]): + [Output Only] If potential misconfigurations are detected + for this route, this field will be populated with warning + messages. + """ + class RouteType(proto.Enum): + r"""[Output Only] The type of this route, which can be one of the + following values: - 'TRANSIT' for a transit route that this router + learned from another Cloud Router and will readvertise to one of its + BGP peers - 'SUBNET' for a route from a subnet of the VPC - 'BGP' + for a route learned from a BGP peer of this router - 'STATIC' for a + static route + """ + UNDEFINED_ROUTE_TYPE = 0 + BGP = 65707 + STATIC = 308331118 + SUBNET = 309278557 + TRANSIT = 187793843 + + as_paths = proto.RepeatedField( + proto.MESSAGE, + number=137568929, + message='RouteAsPath', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + dest_range = proto.Field( + proto.STRING, + number=381327712, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + next_hop_gateway = proto.Field( + proto.STRING, + number=377175298, + optional=True, + ) + next_hop_ilb = proto.Field( + proto.STRING, + number=198679901, + optional=True, + ) + next_hop_instance = proto.Field( + proto.STRING, + number=393508247, + optional=True, + ) + next_hop_ip = proto.Field( + proto.STRING, + number=110319529, + optional=True, + ) + next_hop_network = proto.Field( + proto.STRING, + number=262295788, + optional=True, + ) + next_hop_peering = proto.Field( + proto.STRING, + number=412682750, + optional=True, + ) + next_hop_vpn_tunnel = proto.Field( + proto.STRING, + number=519844501, + optional=True, + ) + priority = proto.Field( + proto.UINT32, + number=445151652, + optional=True, + ) + route_type = proto.Field( + proto.STRING, + number=375888752, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + tags = proto.RepeatedField( + proto.STRING, + number=3552281, + ) + warnings = proto.RepeatedField( + proto.MESSAGE, + number=498091095, + message='Warnings', + ) + + +class RouteAsPath(proto.Message): + r""" + + Attributes: + as_lists (Sequence[int]): + [Output Only] The AS numbers of the AS Path. + path_segment_type (str): + [Output Only] The type of the AS Path, which can be one of + the following values: - 'AS_SET': unordered set of + autonomous systems that the route in has traversed - + 'AS_SEQUENCE': ordered set of autonomous systems that the + route has traversed - 'AS_CONFED_SEQUENCE': ordered set of + Member Autonomous Systems in the local confederation that + the route has traversed - 'AS_CONFED_SET': unordered set of + Member Autonomous Systems in the local confederation that + the route has traversed Check the PathSegmentType enum for + the list of possible values. + + This field is a member of `oneof`_ ``_path_segment_type``. + """ + class PathSegmentType(proto.Enum): + r"""[Output Only] The type of the AS Path, which can be one of the + following values: - 'AS_SET': unordered set of autonomous systems + that the route in has traversed - 'AS_SEQUENCE': ordered set of + autonomous systems that the route has traversed - + 'AS_CONFED_SEQUENCE': ordered set of Member Autonomous Systems in + the local confederation that the route has traversed - + 'AS_CONFED_SET': unordered set of Member Autonomous Systems in the + local confederation that the route has traversed + """ + UNDEFINED_PATH_SEGMENT_TYPE = 0 + AS_CONFED_SEQUENCE = 222152624 + AS_CONFED_SET = 374040307 + AS_SEQUENCE = 106735918 + AS_SET = 329846453 + + as_lists = proto.RepeatedField( + proto.UINT32, + number=134112584, + ) + path_segment_type = proto.Field( + proto.STRING, + number=513464992, + optional=True, + ) + + +class RouteList(proto.Message): + r"""Contains a list of Route resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Route]): + A list of Route resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Route', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class Router(proto.Message): + r"""Represents a Cloud Router resource. For more information + about Cloud Router, read the Cloud Router overview. + + Attributes: + bgp (google.cloud.compute_v1.types.RouterBgp): + BGP information specific to this router. + + This field is a member of `oneof`_ ``_bgp``. + bgp_peers (Sequence[google.cloud.compute_v1.types.RouterBgpPeer]): + BGP information that must be configured into + the routing stack to establish BGP peering. This + information must specify the peer ASN and either + the interface name, IP address, or peer IP + address. Please refer to RFC4273. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + encrypted_interconnect_router (bool): + Indicates if a router is dedicated for use + with encrypted VLAN attachments + (interconnectAttachments). Not currently + available publicly. + + This field is a member of `oneof`_ ``_encrypted_interconnect_router``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + interfaces (Sequence[google.cloud.compute_v1.types.RouterInterface]): + Router interfaces. Each interface requires + either one linked resource, (for example, + linkedVpnTunnel), or IP address and IP address + range (for example, ipRange), or both. + kind (str): + [Output Only] Type of resource. Always compute#router for + routers. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + nats (Sequence[google.cloud.compute_v1.types.RouterNat]): + A list of NAT services created in this + router. + network (str): + URI of the network to which this router + belongs. + + This field is a member of `oneof`_ ``_network``. + region (str): + [Output Only] URI of the region where the router resides. + You must specify this field as part of the HTTP request URL. + It is not settable as a field in the request body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + """ + + bgp = proto.Field( + proto.MESSAGE, + number=97483, + optional=True, + message='RouterBgp', + ) + bgp_peers = proto.RepeatedField( + proto.MESSAGE, + number=452695773, + message='RouterBgpPeer', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + encrypted_interconnect_router = proto.Field( + proto.BOOL, + number=297996575, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + interfaces = proto.RepeatedField( + proto.MESSAGE, + number=12073562, + message='RouterInterface', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + nats = proto.RepeatedField( + proto.MESSAGE, + number=3373938, + message='RouterNat', + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class RouterAdvertisedIpRange(proto.Message): + r"""Description-tagged IP ranges for the router to advertise. + + Attributes: + description (str): + User-specified description for the IP range. + + This field is a member of `oneof`_ ``_description``. + range_ (str): + The IP range to advertise. The value must be + a CIDR-formatted string. + + This field is a member of `oneof`_ ``_range``. + """ + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + range_ = proto.Field( + proto.STRING, + number=108280125, + optional=True, + ) + + +class RouterAggregatedList(proto.Message): + r"""Contains a list of routers. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.RouterAggregatedList.ItemsEntry]): + A list of Router resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='RoutersScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RouterBgp(proto.Message): + r""" + + Attributes: + advertise_mode (str): + User-specified flag to indicate which mode to + use for advertisement. The options are DEFAULT + or CUSTOM. Check the AdvertiseMode enum for the + list of possible values. + + This field is a member of `oneof`_ ``_advertise_mode``. + advertised_groups (Sequence[str]): + User-specified list of prefix groups to advertise in custom + mode. This field can only be populated if advertise_mode is + CUSTOM and is advertised to all peers of the router. These + groups will be advertised in addition to any specified + prefixes. Leave this field blank to advertise no custom + groups. Check the AdvertisedGroups enum for the list of + possible values. + advertised_ip_ranges (Sequence[google.cloud.compute_v1.types.RouterAdvertisedIpRange]): + User-specified list of individual IP ranges to advertise in + custom mode. This field can only be populated if + advertise_mode is CUSTOM and is advertised to all peers of + the router. These IP ranges will be advertised in addition + to any specified groups. Leave this field blank to advertise + no custom IP ranges. + asn (int): + Local BGP Autonomous System Number (ASN). + Must be an RFC6996 private ASN, either 16-bit or + 32-bit. The value will be fixed for this router + resource. All VPN tunnels that link to this + router will have the same local ASN. + + This field is a member of `oneof`_ ``_asn``. + keepalive_interval (int): + The interval in seconds between BGP keepalive + messages that are sent to the peer. Hold time is + three times the interval at which keepalive + messages are sent, and the hold time is the + maximum number of seconds allowed to elapse + between successive keepalive messages that BGP + receives from a peer. BGP will use the smaller + of either the local hold time value or the + peer's hold time value as the hold time for the + BGP connection between the two peers. If set, + this value must be between 20 and 60. The + default is 20. + + This field is a member of `oneof`_ ``_keepalive_interval``. + """ + class AdvertiseMode(proto.Enum): + r"""User-specified flag to indicate which mode to use for + advertisement. The options are DEFAULT or CUSTOM. + """ + UNDEFINED_ADVERTISE_MODE = 0 + CUSTOM = 388595569 + DEFAULT = 115302945 + + class AdvertisedGroups(proto.Enum): + r"""""" + UNDEFINED_ADVERTISED_GROUPS = 0 + ALL_SUBNETS = 3622872 + + advertise_mode = proto.Field( + proto.STRING, + number=312134331, + optional=True, + ) + advertised_groups = proto.RepeatedField( + proto.STRING, + number=21065526, + ) + advertised_ip_ranges = proto.RepeatedField( + proto.MESSAGE, + number=35449932, + message='RouterAdvertisedIpRange', + ) + asn = proto.Field( + proto.UINT32, + number=96892, + optional=True, + ) + keepalive_interval = proto.Field( + proto.UINT32, + number=276771516, + optional=True, + ) + + +class RouterBgpPeer(proto.Message): + r""" + + Attributes: + advertise_mode (str): + User-specified flag to indicate which mode to + use for advertisement. Check the AdvertiseMode + enum for the list of possible values. + + This field is a member of `oneof`_ ``_advertise_mode``. + advertised_groups (Sequence[str]): + User-specified list of prefix groups to advertise in custom + mode, which can take one of the following options: - + ALL_SUBNETS: Advertises all available subnets, including + peer VPC subnets. - ALL_VPC_SUBNETS: Advertises the router's + own VPC subnets. Note that this field can only be populated + if advertise_mode is CUSTOM and overrides the list defined + for the router (in the "bgp" message). These groups are + advertised in addition to any specified prefixes. Leave this + field blank to advertise no custom groups. Check the + AdvertisedGroups enum for the list of possible values. + advertised_ip_ranges (Sequence[google.cloud.compute_v1.types.RouterAdvertisedIpRange]): + User-specified list of individual IP ranges to advertise in + custom mode. This field can only be populated if + advertise_mode is CUSTOM and overrides the list defined for + the router (in the "bgp" message). These IP ranges are + advertised in addition to any specified groups. Leave this + field blank to advertise no custom IP ranges. + advertised_route_priority (int): + The priority of routes advertised to this BGP + peer. Where there is more than one matching + route of maximum length, the routes with the + lowest priority value win. + + This field is a member of `oneof`_ ``_advertised_route_priority``. + bfd (google.cloud.compute_v1.types.RouterBgpPeerBfd): + BFD configuration for the BGP peering. + + This field is a member of `oneof`_ ``_bfd``. + enable (str): + The status of the BGP peer connection. If set + to FALSE, any active session with the peer is + terminated and all associated routing + information is removed. If set to TRUE, the peer + connection can be established with routing + information. The default is TRUE. Check the + Enable enum for the list of possible values. + + This field is a member of `oneof`_ ``_enable``. + interface_name (str): + Name of the interface the BGP peer is + associated with. + + This field is a member of `oneof`_ ``_interface_name``. + ip_address (str): + IP address of the interface inside Google + Cloud Platform. Only IPv4 is supported. + + This field is a member of `oneof`_ ``_ip_address``. + management_type (str): + [Output Only] The resource that configures and manages this + BGP peer. - MANAGED_BY_USER is the default value and can be + managed by you or other users - MANAGED_BY_ATTACHMENT is a + BGP peer that is configured and managed by Cloud + Interconnect, specifically by an InterconnectAttachment of + type PARTNER. Google automatically creates, updates, and + deletes this type of BGP peer when the PARTNER + InterconnectAttachment is created, updated, or deleted. + Check the ManagementType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_management_type``. + name (str): + Name of this BGP peer. The name must be 1-63 characters + long, and comply with RFC1035. Specifically, the name must + be 1-63 characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + peer_asn (int): + Peer BGP Autonomous System Number (ASN). Each + BGP interface may use a different value. + + This field is a member of `oneof`_ ``_peer_asn``. + peer_ip_address (str): + IP address of the BGP interface outside + Google Cloud Platform. Only IPv4 is supported. + + This field is a member of `oneof`_ ``_peer_ip_address``. + router_appliance_instance (str): + URI of the VM instance that is used as third- + arty router appliances such as Next Gen + Firewalls, Virtual Routers, or Router + Appliances. The VM instance must be located in + zones contained in the same region as this Cloud + Router. The VM instance is the peer side of the + BGP session. + + This field is a member of `oneof`_ ``_router_appliance_instance``. + """ + class AdvertiseMode(proto.Enum): + r"""User-specified flag to indicate which mode to use for + advertisement. + """ + UNDEFINED_ADVERTISE_MODE = 0 + CUSTOM = 388595569 + DEFAULT = 115302945 + + class AdvertisedGroups(proto.Enum): + r"""""" + UNDEFINED_ADVERTISED_GROUPS = 0 + ALL_SUBNETS = 3622872 + + class Enable(proto.Enum): + r"""The status of the BGP peer connection. If set to FALSE, any + active session with the peer is terminated and all associated + routing information is removed. If set to TRUE, the peer + connection can be established with routing information. The + default is TRUE. + """ + UNDEFINED_ENABLE = 0 + FALSE = 66658563 + TRUE = 2583950 + + class ManagementType(proto.Enum): + r"""[Output Only] The resource that configures and manages this BGP + peer. - MANAGED_BY_USER is the default value and can be managed by + you or other users - MANAGED_BY_ATTACHMENT is a BGP peer that is + configured and managed by Cloud Interconnect, specifically by an + InterconnectAttachment of type PARTNER. Google automatically + creates, updates, and deletes this type of BGP peer when the PARTNER + InterconnectAttachment is created, updated, or deleted. + """ + UNDEFINED_MANAGEMENT_TYPE = 0 + MANAGED_BY_ATTACHMENT = 458926411 + MANAGED_BY_USER = 317294067 + + advertise_mode = proto.Field( + proto.STRING, + number=312134331, + optional=True, + ) + advertised_groups = proto.RepeatedField( + proto.STRING, + number=21065526, + ) + advertised_ip_ranges = proto.RepeatedField( + proto.MESSAGE, + number=35449932, + message='RouterAdvertisedIpRange', + ) + advertised_route_priority = proto.Field( + proto.UINT32, + number=186486332, + optional=True, + ) + bfd = proto.Field( + proto.MESSAGE, + number=97440, + optional=True, + message='RouterBgpPeerBfd', + ) + enable = proto.Field( + proto.STRING, + number=311764355, + optional=True, + ) + interface_name = proto.Field( + proto.STRING, + number=437854673, + optional=True, + ) + ip_address = proto.Field( + proto.STRING, + number=406272220, + optional=True, + ) + management_type = proto.Field( + proto.STRING, + number=173703606, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + peer_asn = proto.Field( + proto.UINT32, + number=69573151, + optional=True, + ) + peer_ip_address = proto.Field( + proto.STRING, + number=207735769, + optional=True, + ) + router_appliance_instance = proto.Field( + proto.STRING, + number=468312989, + optional=True, + ) + + +class RouterBgpPeerBfd(proto.Message): + r""" + + Attributes: + min_receive_interval (int): + The minimum interval, in milliseconds, + between BFD control packets received from the + peer router. The actual value is negotiated + between the two routers and is equal to the + greater of this value and the transmit interval + of the other router. If set, this value must be + between 1000 and 30000. The default is 1000. + + This field is a member of `oneof`_ ``_min_receive_interval``. + min_transmit_interval (int): + The minimum interval, in milliseconds, + between BFD control packets transmitted to the + peer router. The actual value is negotiated + between the two routers and is equal to the + greater of this value and the corresponding + receive interval of the other router. If set, + this value must be between 1000 and 30000. The + default is 1000. + + This field is a member of `oneof`_ ``_min_transmit_interval``. + multiplier (int): + The number of consecutive BFD packets that + must be missed before BFD declares that a peer + is unavailable. If set, the value must be a + value between 5 and 16. The default is 5. + + This field is a member of `oneof`_ ``_multiplier``. + session_initialization_mode (str): + The BFD session initialization mode for this + BGP peer. If set to ACTIVE, the Cloud Router + will initiate the BFD session for this BGP peer. + If set to PASSIVE, the Cloud Router will wait + for the peer router to initiate the BFD session + for this BGP peer. If set to DISABLED, BFD is + disabled for this BGP peer. The default is + PASSIVE. Check the SessionInitializationMode + enum for the list of possible values. + + This field is a member of `oneof`_ ``_session_initialization_mode``. + """ + class SessionInitializationMode(proto.Enum): + r"""The BFD session initialization mode for this BGP peer. If set + to ACTIVE, the Cloud Router will initiate the BFD session for + this BGP peer. If set to PASSIVE, the Cloud Router will wait for + the peer router to initiate the BFD session for this BGP peer. + If set to DISABLED, BFD is disabled for this BGP peer. The + default is PASSIVE. + """ + UNDEFINED_SESSION_INITIALIZATION_MODE = 0 + ACTIVE = 314733318 + DISABLED = 516696700 + PASSIVE = 462813959 + + min_receive_interval = proto.Field( + proto.UINT32, + number=186981614, + optional=True, + ) + min_transmit_interval = proto.Field( + proto.UINT32, + number=523282631, + optional=True, + ) + multiplier = proto.Field( + proto.UINT32, + number=191331777, + optional=True, + ) + session_initialization_mode = proto.Field( + proto.STRING, + number=105957049, + optional=True, + ) + + +class RouterInterface(proto.Message): + r""" + + Attributes: + ip_range (str): + IP address and range of the interface. The IP + range must be in the RFC3927 link-local IP + address space. The value must be a CIDR- + formatted string, for example: 169.254.0.1/30. + NOTE: Do not truncate the address as it + represents the IP address of the interface. + + This field is a member of `oneof`_ ``_ip_range``. + linked_interconnect_attachment (str): + URI of the linked Interconnect attachment. It + must be in the same region as the router. Each + interface can have one linked resource, which + can be a VPN tunnel, an Interconnect attachment, + or a virtual machine instance. + + This field is a member of `oneof`_ ``_linked_interconnect_attachment``. + linked_vpn_tunnel (str): + URI of the linked VPN tunnel, which must be + in the same region as the router. Each interface + can have one linked resource, which can be a VPN + tunnel, an Interconnect attachment, or a virtual + machine instance. + + This field is a member of `oneof`_ ``_linked_vpn_tunnel``. + management_type (str): + [Output Only] The resource that configures and manages this + interface. - MANAGED_BY_USER is the default value and can be + managed directly by users. - MANAGED_BY_ATTACHMENT is an + interface that is configured and managed by Cloud + Interconnect, specifically, by an InterconnectAttachment of + type PARTNER. Google automatically creates, updates, and + deletes this type of interface when the PARTNER + InterconnectAttachment is created, updated, or deleted. + Check the ManagementType enum for the list of possible + values. + + This field is a member of `oneof`_ ``_management_type``. + name (str): + Name of this interface entry. The name must be 1-63 + characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + private_ip_address (str): + The regional private internal IP address that + is used to establish BGP sessions to a VM + instance acting as a third-party Router + Appliance, such as a Next Gen Firewall, a + Virtual Router, or an SD-WAN VM. + + This field is a member of `oneof`_ ``_private_ip_address``. + redundant_interface (str): + Name of the interface that will be redundant with the + current interface you are creating. The redundantInterface + must belong to the same Cloud Router as the interface here. + To establish the BGP session to a Router Appliance VM, you + must create two BGP peers. The two BGP peers must be + attached to two separate interfaces that are redundant with + each other. The redundant_interface must be 1-63 characters + long, and comply with RFC1035. Specifically, the + redundant_interface must be 1-63 characters long and match + the regular expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which + means the first character must be a lowercase letter, and + all following characters must be a dash, lowercase letter, + or digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_redundant_interface``. + subnetwork (str): + The URI of the subnetwork resource that this + interface belongs to, which must be in the same + region as the Cloud Router. When you establish a + BGP session to a VM instance using this + interface, the VM instance must belong to the + same subnetwork as the subnetwork specified + here. + + This field is a member of `oneof`_ ``_subnetwork``. + """ + class ManagementType(proto.Enum): + r"""[Output Only] The resource that configures and manages this + interface. - MANAGED_BY_USER is the default value and can be managed + directly by users. - MANAGED_BY_ATTACHMENT is an interface that is + configured and managed by Cloud Interconnect, specifically, by an + InterconnectAttachment of type PARTNER. Google automatically + creates, updates, and deletes this type of interface when the + PARTNER InterconnectAttachment is created, updated, or deleted. + """ + UNDEFINED_MANAGEMENT_TYPE = 0 + MANAGED_BY_ATTACHMENT = 458926411 + MANAGED_BY_USER = 317294067 + + ip_range = proto.Field( + proto.STRING, + number=145092645, + optional=True, + ) + linked_interconnect_attachment = proto.Field( + proto.STRING, + number=501085518, + optional=True, + ) + linked_vpn_tunnel = proto.Field( + proto.STRING, + number=352296953, + optional=True, + ) + management_type = proto.Field( + proto.STRING, + number=173703606, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + private_ip_address = proto.Field( + proto.STRING, + number=100854040, + optional=True, + ) + redundant_interface = proto.Field( + proto.STRING, + number=523187303, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + optional=True, + ) + + +class RouterList(proto.Message): + r"""Contains a list of Router resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Router]): + A list of Router resources. + kind (str): + [Output Only] Type of resource. Always compute#router for + routers. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Router', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class RouterNat(proto.Message): + r"""Represents a Nat resource. It enables the VMs within the + specified subnetworks to access Internet without external IP + addresses. It specifies a list of subnetworks (and the ranges + within) that want to use NAT. Customers can also provide the + external IPs that would be used for NAT. GCP would auto-allocate + ephemeral IPs if no external IPs are provided. + + Attributes: + drain_nat_ips (Sequence[str]): + A list of URLs of the IP resources to be + drained. These IPs must be valid static external + IPs that have been assigned to the NAT. These + IPs should be used for updating/patching a NAT + only. + enable_endpoint_independent_mapping (bool): + + This field is a member of `oneof`_ ``_enable_endpoint_independent_mapping``. + icmp_idle_timeout_sec (int): + Timeout (in seconds) for ICMP connections. + Defaults to 30s if not set. + + This field is a member of `oneof`_ ``_icmp_idle_timeout_sec``. + log_config (google.cloud.compute_v1.types.RouterNatLogConfig): + Configure logging on this NAT. + + This field is a member of `oneof`_ ``_log_config``. + min_ports_per_vm (int): + Minimum number of ports allocated to a VM + from this NAT config. If not set, a default + number of ports is allocated to a VM. This is + rounded up to the nearest power of 2. For + example, if the value of this field is 50, at + least 64 ports are allocated to a VM. + + This field is a member of `oneof`_ ``_min_ports_per_vm``. + name (str): + Unique name of this Nat service. The name + must be 1-63 characters long and comply with + RFC1035. + + This field is a member of `oneof`_ ``_name``. + nat_ip_allocate_option (str): + Specify the NatIpAllocateOption, which can take one of the + following values: - MANUAL_ONLY: Uses only Nat IP addresses + provided by customers. When there are not enough specified + Nat IPs, the Nat service fails for new VMs. - AUTO_ONLY: Nat + IPs are allocated by Google Cloud Platform; customers can't + specify any Nat IPs. When choosing AUTO_ONLY, then nat_ip + should be empty. Check the NatIpAllocateOption enum for the + list of possible values. + + This field is a member of `oneof`_ ``_nat_ip_allocate_option``. + nat_ips (Sequence[str]): + A list of URLs of the IP resources used for + this Nat service. These IP addresses must be + valid static external IP addresses assigned to + the project. + rules (Sequence[google.cloud.compute_v1.types.RouterNatRule]): + A list of rules associated with this NAT. + source_subnetwork_ip_ranges_to_nat (str): + Specify the Nat option, which can take one of the following + values: - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP + ranges in every Subnetwork are allowed to Nat. - + ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP + ranges in every Subnetwork are allowed to Nat. - + LIST_OF_SUBNETWORKS: A list of Subnetworks are allowed to + Nat (specified in the field subnetwork below) The default is + SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if + this field contains ALL_SUBNETWORKS_ALL_IP_RANGES or + ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not + be any other Router.Nat section in any Router for this + network in this region. Check the + SourceSubnetworkIpRangesToNat enum for the list of possible + values. + + This field is a member of `oneof`_ ``_source_subnetwork_ip_ranges_to_nat``. + subnetworks (Sequence[google.cloud.compute_v1.types.RouterNatSubnetworkToNat]): + A list of Subnetwork resources whose traffic should be + translated by NAT Gateway. It is used only when + LIST_OF_SUBNETWORKS is selected for the + SubnetworkIpRangeToNatOption above. + tcp_established_idle_timeout_sec (int): + Timeout (in seconds) for TCP established + connections. Defaults to 1200s if not set. + + This field is a member of `oneof`_ ``_tcp_established_idle_timeout_sec``. + tcp_time_wait_timeout_sec (int): + Timeout (in seconds) for TCP connections that are in + TIME_WAIT state. Defaults to 120s if not set. + + This field is a member of `oneof`_ ``_tcp_time_wait_timeout_sec``. + tcp_transitory_idle_timeout_sec (int): + Timeout (in seconds) for TCP transitory + connections. Defaults to 30s if not set. + + This field is a member of `oneof`_ ``_tcp_transitory_idle_timeout_sec``. + udp_idle_timeout_sec (int): + Timeout (in seconds) for UDP connections. + Defaults to 30s if not set. + + This field is a member of `oneof`_ ``_udp_idle_timeout_sec``. + """ + class NatIpAllocateOption(proto.Enum): + r"""Specify the NatIpAllocateOption, which can take one of the following + values: - MANUAL_ONLY: Uses only Nat IP addresses provided by + customers. When there are not enough specified Nat IPs, the Nat + service fails for new VMs. - AUTO_ONLY: Nat IPs are allocated by + Google Cloud Platform; customers can't specify any Nat IPs. When + choosing AUTO_ONLY, then nat_ip should be empty. + """ + UNDEFINED_NAT_IP_ALLOCATE_OPTION = 0 + AUTO_ONLY = 182333500 + MANUAL_ONLY = 261251205 + + class SourceSubnetworkIpRangesToNat(proto.Enum): + r"""Specify the Nat option, which can take one of the following values: + - ALL_SUBNETWORKS_ALL_IP_RANGES: All of the IP ranges in every + Subnetwork are allowed to Nat. - + ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES: All of the primary IP ranges + in every Subnetwork are allowed to Nat. - LIST_OF_SUBNETWORKS: A + list of Subnetworks are allowed to Nat (specified in the field + subnetwork below) The default is + SUBNETWORK_IP_RANGE_TO_NAT_OPTION_UNSPECIFIED. Note that if this + field contains ALL_SUBNETWORKS_ALL_IP_RANGES or + ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES, then there should not be any + other Router.Nat section in any Router for this network in this + region. + """ + UNDEFINED_SOURCE_SUBNETWORK_IP_RANGES_TO_NAT = 0 + ALL_SUBNETWORKS_ALL_IP_RANGES = 179964376 + ALL_SUBNETWORKS_ALL_PRIMARY_IP_RANGES = 185573819 + LIST_OF_SUBNETWORKS = 517542270 + + drain_nat_ips = proto.RepeatedField( + proto.STRING, + number=504078535, + ) + enable_endpoint_independent_mapping = proto.Field( + proto.BOOL, + number=259441819, + optional=True, + ) + icmp_idle_timeout_sec = proto.Field( + proto.INT32, + number=3647562, + optional=True, + ) + log_config = proto.Field( + proto.MESSAGE, + number=351299741, + optional=True, + message='RouterNatLogConfig', + ) + min_ports_per_vm = proto.Field( + proto.INT32, + number=186193587, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + nat_ip_allocate_option = proto.Field( + proto.STRING, + number=429726845, + optional=True, + ) + nat_ips = proto.RepeatedField( + proto.STRING, + number=117635086, + ) + rules = proto.RepeatedField( + proto.MESSAGE, + number=108873975, + message='RouterNatRule', + ) + source_subnetwork_ip_ranges_to_nat = proto.Field( + proto.STRING, + number=252213211, + optional=True, + ) + subnetworks = proto.RepeatedField( + proto.MESSAGE, + number=415853125, + message='RouterNatSubnetworkToNat', + ) + tcp_established_idle_timeout_sec = proto.Field( + proto.INT32, + number=223098349, + optional=True, + ) + tcp_time_wait_timeout_sec = proto.Field( + proto.INT32, + number=513596925, + optional=True, + ) + tcp_transitory_idle_timeout_sec = proto.Field( + proto.INT32, + number=205028774, + optional=True, + ) + udp_idle_timeout_sec = proto.Field( + proto.INT32, + number=64919878, + optional=True, + ) + + +class RouterNatLogConfig(proto.Message): + r"""Configuration of logging on a NAT. + + Attributes: + enable (bool): + Indicates whether or not to export logs. This + is false by default. + + This field is a member of `oneof`_ ``_enable``. + filter (str): + Specify the desired filtering of logs on this NAT. If + unspecified, logs are exported for all connections handled + by this NAT. This option can take one of the following + values: - ERRORS_ONLY: Export logs only for connection + failures. - TRANSLATIONS_ONLY: Export logs only for + successful connections. - ALL: Export logs for all + connections, successful and unsuccessful. Check the Filter + enum for the list of possible values. + + This field is a member of `oneof`_ ``_filter``. + """ + class Filter(proto.Enum): + r"""Specify the desired filtering of logs on this NAT. If unspecified, + logs are exported for all connections handled by this NAT. This + option can take one of the following values: - ERRORS_ONLY: Export + logs only for connection failures. - TRANSLATIONS_ONLY: Export logs + only for successful connections. - ALL: Export logs for all + connections, successful and unsuccessful. + """ + UNDEFINED_FILTER = 0 + ALL = 64897 + ERRORS_ONLY = 307484672 + TRANSLATIONS_ONLY = 357212649 + + enable = proto.Field( + proto.BOOL, + number=311764355, + optional=True, + ) + filter = proto.Field( + proto.STRING, + number=336120696, + optional=True, + ) + + +class RouterNatRule(proto.Message): + r""" + + Attributes: + action (google.cloud.compute_v1.types.RouterNatRuleAction): + The action to be enforced for traffic that + matches this rule. + + This field is a member of `oneof`_ ``_action``. + description (str): + An optional description of this rule. + + This field is a member of `oneof`_ ``_description``. + match (str): + CEL expression that specifies the match condition that + egress traffic from a VM is evaluated against. If it + evaluates to true, the corresponding ``action`` is enforced. + The following examples are valid match expressions for + public NAT: "inIpRange(destination.ip, '1.1.0.0/16') \|\| + inIpRange(destination.ip, '2.2.0.0/16')" "destination.ip == + '1.1.0.1' \|\| destination.ip == '8.8.8.8'" The following + example is a valid match expression for private NAT: + "nexthop.hub == '/projects/my-project/global/hub/hub-1'". + + This field is a member of `oneof`_ ``_match``. + rule_number (int): + An integer uniquely identifying a rule in the + list. The rule number must be a positive value + between 0 and 65000, and must be unique among + rules within a NAT. + + This field is a member of `oneof`_ ``_rule_number``. + """ + + action = proto.Field( + proto.MESSAGE, + number=187661878, + optional=True, + message='RouterNatRuleAction', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + match = proto.Field( + proto.STRING, + number=103668165, + optional=True, + ) + rule_number = proto.Field( + proto.UINT32, + number=535211500, + optional=True, + ) + + +class RouterNatRuleAction(proto.Message): + r""" + + Attributes: + source_nat_active_ips (Sequence[str]): + A list of URLs of the IP resources used for + this NAT rule. These IP addresses must be valid + static external IP addresses assigned to the + project. This field is used for public NAT. + source_nat_drain_ips (Sequence[str]): + A list of URLs of the IP resources to be + drained. These IPs must be valid static external + IPs that have been assigned to the NAT. These + IPs should be used for updating/patching a NAT + rule only. This field is used for public NAT. + """ + + source_nat_active_ips = proto.RepeatedField( + proto.STRING, + number=210378229, + ) + source_nat_drain_ips = proto.RepeatedField( + proto.STRING, + number=340812451, + ) + + +class RouterNatSubnetworkToNat(proto.Message): + r"""Defines the IP ranges that want to use NAT for a subnetwork. + + Attributes: + name (str): + URL for the subnetwork resource that will use + NAT. + + This field is a member of `oneof`_ ``_name``. + secondary_ip_range_names (Sequence[str]): + A list of the secondary ranges of the Subnetwork that are + allowed to use NAT. This can be populated only if + "LIST_OF_SECONDARY_IP_RANGES" is one of the values in + source_ip_ranges_to_nat. + source_ip_ranges_to_nat (Sequence[str]): + Specify the options for NAT ranges in the Subnetwork. All + options of a single value are valid except + NAT_IP_RANGE_OPTION_UNSPECIFIED. The only valid option with + multiple values is: ["PRIMARY_IP_RANGE", + "LIST_OF_SECONDARY_IP_RANGES"] Default: [ALL_IP_RANGES] + Check the SourceIpRangesToNat enum for the list of possible + values. + """ + class SourceIpRangesToNat(proto.Enum): + r"""""" + UNDEFINED_SOURCE_IP_RANGES_TO_NAT = 0 + ALL_IP_RANGES = 35608496 + LIST_OF_SECONDARY_IP_RANGES = 192289308 + PRIMARY_IP_RANGE = 297109954 + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + secondary_ip_range_names = proto.RepeatedField( + proto.STRING, + number=264315097, + ) + source_ip_ranges_to_nat = proto.RepeatedField( + proto.STRING, + number=388310386, + ) + + +class RouterStatus(proto.Message): + r""" + + Attributes: + best_routes (Sequence[google.cloud.compute_v1.types.Route]): + Best routes for this router's network. + best_routes_for_router (Sequence[google.cloud.compute_v1.types.Route]): + Best routes learned by this router. + bgp_peer_status (Sequence[google.cloud.compute_v1.types.RouterStatusBgpPeerStatus]): + + nat_status (Sequence[google.cloud.compute_v1.types.RouterStatusNatStatus]): + + network (str): + URI of the network to which this router + belongs. + + This field is a member of `oneof`_ ``_network``. + """ + + best_routes = proto.RepeatedField( + proto.MESSAGE, + number=395826693, + message='Route', + ) + best_routes_for_router = proto.RepeatedField( + proto.MESSAGE, + number=119389689, + message='Route', + ) + bgp_peer_status = proto.RepeatedField( + proto.MESSAGE, + number=218459131, + message='RouterStatusBgpPeerStatus', + ) + nat_status = proto.RepeatedField( + proto.MESSAGE, + number=63098064, + message='RouterStatusNatStatus', + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + + +class RouterStatusBgpPeerStatus(proto.Message): + r""" + + Attributes: + advertised_routes (Sequence[google.cloud.compute_v1.types.Route]): + Routes that were advertised to the remote BGP + peer + ip_address (str): + IP address of the local BGP interface. + + This field is a member of `oneof`_ ``_ip_address``. + linked_vpn_tunnel (str): + URL of the VPN tunnel that this BGP peer + controls. + + This field is a member of `oneof`_ ``_linked_vpn_tunnel``. + name (str): + Name of this BGP peer. Unique within the + Routers resource. + + This field is a member of `oneof`_ ``_name``. + num_learned_routes (int): + Number of routes learned from the remote BGP + Peer. + + This field is a member of `oneof`_ ``_num_learned_routes``. + peer_ip_address (str): + IP address of the remote BGP interface. + + This field is a member of `oneof`_ ``_peer_ip_address``. + router_appliance_instance (str): + [Output only] URI of the VM instance that is used as + third-party router appliances such as Next Gen Firewalls, + Virtual Routers, or Router Appliances. The VM instance is + the peer side of the BGP session. + + This field is a member of `oneof`_ ``_router_appliance_instance``. + state (str): + BGP state as specified in RFC1771. + + This field is a member of `oneof`_ ``_state``. + status (str): + Status of the BGP peer: {UP, DOWN} + Check the Status enum for the list of possible + values. + + This field is a member of `oneof`_ ``_status``. + uptime (str): + Time this session has been up. Format: 14 + years, 51 weeks, 6 days, 23 hours, 59 minutes, + 59 seconds + + This field is a member of `oneof`_ ``_uptime``. + uptime_seconds (str): + Time this session has been up, in seconds. + Format: 145 + + This field is a member of `oneof`_ ``_uptime_seconds``. + """ + class Status(proto.Enum): + r"""Status of the BGP peer: {UP, DOWN}""" + UNDEFINED_STATUS = 0 + DOWN = 2104482 + UNKNOWN = 433141802 + UP = 2715 + + advertised_routes = proto.RepeatedField( + proto.MESSAGE, + number=333393068, + message='Route', + ) + ip_address = proto.Field( + proto.STRING, + number=406272220, + optional=True, + ) + linked_vpn_tunnel = proto.Field( + proto.STRING, + number=352296953, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + num_learned_routes = proto.Field( + proto.UINT32, + number=135457535, + optional=True, + ) + peer_ip_address = proto.Field( + proto.STRING, + number=207735769, + optional=True, + ) + router_appliance_instance = proto.Field( + proto.STRING, + number=468312989, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + uptime = proto.Field( + proto.STRING, + number=235379688, + optional=True, + ) + uptime_seconds = proto.Field( + proto.STRING, + number=104736040, + optional=True, + ) + + +class RouterStatusNatStatus(proto.Message): + r"""Status of a NAT contained in this router. + + Attributes: + auto_allocated_nat_ips (Sequence[str]): + A list of IPs auto-allocated for NAT. Example: ["1.1.1.1", + "129.2.16.89"] + drain_auto_allocated_nat_ips (Sequence[str]): + A list of IPs auto-allocated for NAT that are in drain mode. + Example: ["1.1.1.1", "179.12.26.133"]. + drain_user_allocated_nat_ips (Sequence[str]): + A list of IPs user-allocated for NAT that are in drain mode. + Example: ["1.1.1.1", "179.12.26.133"]. + min_extra_nat_ips_needed (int): + The number of extra IPs to allocate. This will be greater + than 0 only if user-specified IPs are NOT enough to allow + all configured VMs to use NAT. This value is meaningful only + when auto-allocation of NAT IPs is *not* used. + + This field is a member of `oneof`_ ``_min_extra_nat_ips_needed``. + name (str): + Unique name of this NAT. + + This field is a member of `oneof`_ ``_name``. + num_vm_endpoints_with_nat_mappings (int): + Number of VM endpoints (i.e., Nics) that can + use NAT. + + This field is a member of `oneof`_ ``_num_vm_endpoints_with_nat_mappings``. + rule_status (Sequence[google.cloud.compute_v1.types.RouterStatusNatStatusNatRuleStatus]): + Status of rules in this NAT. + user_allocated_nat_ip_resources (Sequence[str]): + A list of fully qualified URLs of reserved IP + address resources. + user_allocated_nat_ips (Sequence[str]): + A list of IPs user-allocated for NAT. They + will be raw IP strings like "179.12.26.133". + """ + + auto_allocated_nat_ips = proto.RepeatedField( + proto.STRING, + number=510794246, + ) + drain_auto_allocated_nat_ips = proto.RepeatedField( + proto.STRING, + number=309184557, + ) + drain_user_allocated_nat_ips = proto.RepeatedField( + proto.STRING, + number=305268553, + ) + min_extra_nat_ips_needed = proto.Field( + proto.INT32, + number=365786338, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + num_vm_endpoints_with_nat_mappings = proto.Field( + proto.INT32, + number=512367468, + optional=True, + ) + rule_status = proto.RepeatedField( + proto.MESSAGE, + number=140223125, + message='RouterStatusNatStatusNatRuleStatus', + ) + user_allocated_nat_ip_resources = proto.RepeatedField( + proto.STRING, + number=212776151, + ) + user_allocated_nat_ips = proto.RepeatedField( + proto.STRING, + number=506878242, + ) + + +class RouterStatusNatStatusNatRuleStatus(proto.Message): + r"""Status of a NAT Rule contained in this NAT. + + Attributes: + active_nat_ips (Sequence[str]): + A list of active IPs for NAT. Example: ["1.1.1.1", + "179.12.26.133"]. + drain_nat_ips (Sequence[str]): + A list of IPs for NAT that are in drain mode. Example: + ["1.1.1.1", "179.12.26.133"]. + min_extra_ips_needed (int): + The number of extra IPs to allocate. This + will be greater than 0 only if the existing IPs + in this NAT Rule are NOT enough to allow all + configured VMs to use NAT. + + This field is a member of `oneof`_ ``_min_extra_ips_needed``. + num_vm_endpoints_with_nat_mappings (int): + Number of VM endpoints (i.e., NICs) that have + NAT Mappings from this NAT Rule. + + This field is a member of `oneof`_ ``_num_vm_endpoints_with_nat_mappings``. + rule_number (int): + Rule number of the rule. + + This field is a member of `oneof`_ ``_rule_number``. + """ + + active_nat_ips = proto.RepeatedField( + proto.STRING, + number=208517077, + ) + drain_nat_ips = proto.RepeatedField( + proto.STRING, + number=504078535, + ) + min_extra_ips_needed = proto.Field( + proto.INT32, + number=353002756, + optional=True, + ) + num_vm_endpoints_with_nat_mappings = proto.Field( + proto.INT32, + number=512367468, + optional=True, + ) + rule_number = proto.Field( + proto.INT32, + number=535211500, + optional=True, + ) + + +class RouterStatusResponse(proto.Message): + r""" + + Attributes: + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + result (google.cloud.compute_v1.types.RouterStatus): + + This field is a member of `oneof`_ ``_result``. + """ + + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + result = proto.Field( + proto.MESSAGE, + number=139315229, + optional=True, + message='RouterStatus', + ) + + +class RoutersPreviewResponse(proto.Message): + r""" + + Attributes: + resource (google.cloud.compute_v1.types.Router): + Preview of given router. + + This field is a member of `oneof`_ ``_resource``. + """ + + resource = proto.Field( + proto.MESSAGE, + number=195806222, + optional=True, + message='Router', + ) + + +class RoutersScopedList(proto.Message): + r""" + + Attributes: + routers (Sequence[google.cloud.compute_v1.types.Router]): + A list of routers contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of routers when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + routers = proto.RepeatedField( + proto.MESSAGE, + number=311906890, + message='Router', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class Rule(proto.Message): + r"""This is deprecated and has no effect. Do not use. + + Attributes: + action (str): + This is deprecated and has no effect. Do not + use. Check the Action enum for the list of + possible values. + + This field is a member of `oneof`_ ``_action``. + conditions (Sequence[google.cloud.compute_v1.types.Condition]): + This is deprecated and has no effect. Do not + use. + description (str): + This is deprecated and has no effect. Do not + use. + + This field is a member of `oneof`_ ``_description``. + ins (Sequence[str]): + This is deprecated and has no effect. Do not + use. + log_configs (Sequence[google.cloud.compute_v1.types.LogConfig]): + This is deprecated and has no effect. Do not + use. + not_ins (Sequence[str]): + This is deprecated and has no effect. Do not + use. + permissions (Sequence[str]): + This is deprecated and has no effect. Do not + use. + """ + class Action(proto.Enum): + r"""This is deprecated and has no effect. Do not use.""" + UNDEFINED_ACTION = 0 + ALLOW = 62368553 + ALLOW_WITH_LOG = 76034177 + DENY = 2094604 + DENY_WITH_LOG = 351433982 + LOG = 75556 + NO_ACTION = 260643444 + + action = proto.Field( + proto.STRING, + number=187661878, + optional=True, + ) + conditions = proto.RepeatedField( + proto.MESSAGE, + number=142882488, + message='Condition', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + ins = proto.RepeatedField( + proto.STRING, + number=104430, + ) + log_configs = proto.RepeatedField( + proto.MESSAGE, + number=152873846, + message='LogConfig', + ) + not_ins = proto.RepeatedField( + proto.STRING, + number=518443138, + ) + permissions = proto.RepeatedField( + proto.STRING, + number=59962500, + ) + + +class SSLHealthCheck(proto.Message): + r""" + + Attributes: + port (int): + The TCP port number for the health check + request. The default value is 443. Valid values + are 1 through 65535. + + This field is a member of `oneof`_ ``_port``. + port_name (str): + Port name as defined in InstanceGroup#NamedPort#name. If + both port and port_name are defined, port takes precedence. + + This field is a member of `oneof`_ ``_port_name``. + port_specification (str): + Specifies how port is selected for health checking, can be + one of following values: USE_FIXED_PORT: The port number in + port is used for health checking. USE_NAMED_PORT: The + portName is used for health checking. USE_SERVING_PORT: For + NetworkEndpointGroup, the port specified for each network + endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is + used for health checking. If not specified, SSL health check + follows behavior specified in port and portName fields. + Check the PortSpecification enum for the list of possible + values. + + This field is a member of `oneof`_ ``_port_specification``. + proxy_header (str): + Specifies the type of proxy header to append before sending + data to the backend, either NONE or PROXY_V1. The default is + NONE. Check the ProxyHeader enum for the list of possible + values. + + This field is a member of `oneof`_ ``_proxy_header``. + request (str): + The application data to send once the SSL + connection has been established (default value + is empty). If both request and response are + empty, the connection establishment alone will + indicate health. The request data can only be + ASCII. + + This field is a member of `oneof`_ ``_request``. + response (str): + The bytes to match against the beginning of + the response data. If left empty (the default + value), any response will indicate health. The + response data can only be ASCII. + + This field is a member of `oneof`_ ``_response``. + """ + class PortSpecification(proto.Enum): + r"""Specifies how port is selected for health checking, can be one of + following values: USE_FIXED_PORT: The port number in port is used + for health checking. USE_NAMED_PORT: The portName is used for health + checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port + specified for each network endpoint is used for health checking. For + other backends, the port or named port specified in the Backend + Service is used for health checking. If not specified, SSL health + check follows behavior specified in port and portName fields. + """ + UNDEFINED_PORT_SPECIFICATION = 0 + USE_FIXED_PORT = 190235748 + USE_NAMED_PORT = 349300671 + USE_SERVING_PORT = 362637516 + + class ProxyHeader(proto.Enum): + r"""Specifies the type of proxy header to append before sending data to + the backend, either NONE or PROXY_V1. The default is NONE. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + port_specification = proto.Field( + proto.STRING, + number=51590597, + optional=True, + ) + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + request = proto.Field( + proto.STRING, + number=21951119, + optional=True, + ) + response = proto.Field( + proto.STRING, + number=196547649, + optional=True, + ) + + +class ScalingScheduleStatus(proto.Message): + r""" + + Attributes: + last_start_time (str): + [Output Only] The last time the scaling schedule became + active. Note: this is a timestamp when a schedule actually + became active, not when it was planned to do so. The + timestamp is in RFC3339 text format. + + This field is a member of `oneof`_ ``_last_start_time``. + next_start_time (str): + [Output Only] The next time the scaling schedule is to + become active. Note: this is a timestamp when a schedule is + planned to run, but the actual time might be slightly + different. The timestamp is in RFC3339 text format. + + This field is a member of `oneof`_ ``_next_start_time``. + state (str): + [Output Only] The current state of a scaling schedule. Check + the State enum for the list of possible values. + + This field is a member of `oneof`_ ``_state``. + """ + class State(proto.Enum): + r"""[Output Only] The current state of a scaling schedule.""" + UNDEFINED_STATE = 0 + ACTIVE = 314733318 + DISABLED = 516696700 + OBSOLETE = 66532761 + READY = 77848963 + + last_start_time = proto.Field( + proto.STRING, + number=34545107, + optional=True, + ) + next_start_time = proto.Field( + proto.STRING, + number=97270102, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + + +class Scheduling(proto.Message): + r"""Sets the scheduling options for an Instance. NextID: 21 + + Attributes: + automatic_restart (bool): + Specifies whether the instance should be + automatically restarted if it is terminated by + Compute Engine (not terminated by a user). You + can only set the automatic restart option for + standard instances. Preemptible instances cannot + be automatically restarted. By default, this is + set to true so an instance is automatically + restarted if it is terminated by Compute Engine. + + This field is a member of `oneof`_ ``_automatic_restart``. + location_hint (str): + An opaque location hint used to place the + instance close to other resources. This field is + for use by internal tools that use the public + API. + + This field is a member of `oneof`_ ``_location_hint``. + min_node_cpus (int): + The minimum number of virtual CPUs this + instance will consume when running on a sole- + tenant node. + + This field is a member of `oneof`_ ``_min_node_cpus``. + node_affinities (Sequence[google.cloud.compute_v1.types.SchedulingNodeAffinity]): + A set of node affinity and anti-affinity + configurations. Refer to Configuring node + affinity for more information. Overrides + reservationAffinity. + on_host_maintenance (str): + Defines the maintenance behavior for this + instance. For standard instances, the default + behavior is MIGRATE. For preemptible instances, + the default and only possible behavior is + TERMINATE. For more information, see Setting + Instance Scheduling Options. Check the + OnHostMaintenance enum for the list of possible + values. + + This field is a member of `oneof`_ ``_on_host_maintenance``. + preemptible (bool): + Defines whether the instance is preemptible. This can only + be set during instance creation or while the instance is + stopped and therefore, in a ``TERMINATED`` state. See + Instance Life Cycle for more information on the possible + instance states. + + This field is a member of `oneof`_ ``_preemptible``. + """ + class OnHostMaintenance(proto.Enum): + r"""Defines the maintenance behavior for this instance. For + standard instances, the default behavior is MIGRATE. For + preemptible instances, the default and only possible behavior is + TERMINATE. For more information, see Setting Instance Scheduling + Options. + """ + UNDEFINED_ON_HOST_MAINTENANCE = 0 + MIGRATE = 165699979 + TERMINATE = 527617601 + + automatic_restart = proto.Field( + proto.BOOL, + number=350821371, + optional=True, + ) + location_hint = proto.Field( + proto.STRING, + number=350519505, + optional=True, + ) + min_node_cpus = proto.Field( + proto.INT32, + number=317231675, + optional=True, + ) + node_affinities = proto.RepeatedField( + proto.MESSAGE, + number=461799971, + message='SchedulingNodeAffinity', + ) + on_host_maintenance = proto.Field( + proto.STRING, + number=64616796, + optional=True, + ) + preemptible = proto.Field( + proto.BOOL, + number=324203169, + optional=True, + ) + + +class SchedulingNodeAffinity(proto.Message): + r"""Node Affinity: the configuration of desired nodes onto which + this Instance could be scheduled. + + Attributes: + key (str): + Corresponds to the label key of Node + resource. + + This field is a member of `oneof`_ ``_key``. + operator (str): + Defines the operation of node selection. Valid operators are + IN for affinity and NOT_IN for anti-affinity. Check the + Operator enum for the list of possible values. + + This field is a member of `oneof`_ ``_operator``. + values (Sequence[str]): + Corresponds to the label values of Node + resource. + """ + class Operator(proto.Enum): + r"""Defines the operation of node selection. Valid operators are IN for + affinity and NOT_IN for anti-affinity. + """ + UNDEFINED_OPERATOR = 0 + IN = 2341 + NOT_IN = 161144369 + OPERATOR_UNSPECIFIED = 128892924 + + key = proto.Field( + proto.STRING, + number=106079, + optional=True, + ) + operator = proto.Field( + proto.STRING, + number=36317348, + optional=True, + ) + values = proto.RepeatedField( + proto.STRING, + number=249928994, + ) + + +class ScratchDisks(proto.Message): + r""" + + Attributes: + disk_gb (int): + Size of the scratch disk, defined in GB. + + This field is a member of `oneof`_ ``_disk_gb``. + """ + + disk_gb = proto.Field( + proto.INT32, + number=60990141, + optional=True, + ) + + +class Screenshot(proto.Message): + r"""An instance's screenshot. + + Attributes: + contents (str): + [Output Only] The Base64-encoded screenshot data. + + This field is a member of `oneof`_ ``_contents``. + kind (str): + [Output Only] Type of the resource. Always + compute#screenshot for the screenshots. + + This field is a member of `oneof`_ ``_kind``. + """ + + contents = proto.Field( + proto.STRING, + number=506419994, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + + +class SecurityPoliciesListPreconfiguredExpressionSetsResponse(proto.Message): + r""" + + Attributes: + preconfigured_expression_sets (google.cloud.compute_v1.types.SecurityPoliciesWafConfig): + + This field is a member of `oneof`_ ``_preconfigured_expression_sets``. + """ + + preconfigured_expression_sets = proto.Field( + proto.MESSAGE, + number=536200826, + optional=True, + message='SecurityPoliciesWafConfig', + ) + + +class SecurityPoliciesWafConfig(proto.Message): + r""" + + Attributes: + waf_rules (google.cloud.compute_v1.types.PreconfiguredWafSet): + + This field is a member of `oneof`_ ``_waf_rules``. + """ + + waf_rules = proto.Field( + proto.MESSAGE, + number=74899924, + optional=True, + message='PreconfiguredWafSet', + ) + + +class SecurityPolicy(proto.Message): + r"""Represents a Google Cloud Armor security policy resource. + Only external backend services that use load balancers can + reference a security policy. For more information, see Google + Cloud Armor security policy overview. + + Attributes: + adaptive_protection_config (google.cloud.compute_v1.types.SecurityPolicyAdaptiveProtectionConfig): + + This field is a member of `oneof`_ ``_adaptive_protection_config``. + advanced_options_config (google.cloud.compute_v1.types.SecurityPolicyAdvancedOptionsConfig): + + This field is a member of `oneof`_ ``_advanced_options_config``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Specifies a fingerprint for this resource, + which is essentially a hash of the metadata's + contents and used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update metadata. You must always provide an + up-to-date fingerprint hash in order to update + or change metadata, otherwise the request will + fail with error 412 conditionNotMet. To see the + latest fingerprint, make get() request to the + security policy. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output only] Type of the resource. Always + compute#securityPolicyfor security policies + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + rules (Sequence[google.cloud.compute_v1.types.SecurityPolicyRule]): + A list of rules that belong to this policy. There must + always be a default rule (rule with priority 2147483647 and + match "*"). If no rules are provided when creating a + security policy, a default rule with action "allow" will be + added. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + """ + + adaptive_protection_config = proto.Field( + proto.MESSAGE, + number=150240735, + optional=True, + message='SecurityPolicyAdaptiveProtectionConfig', + ) + advanced_options_config = proto.Field( + proto.MESSAGE, + number=449276352, + optional=True, + message='SecurityPolicyAdvancedOptionsConfig', + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + rules = proto.RepeatedField( + proto.MESSAGE, + number=108873975, + message='SecurityPolicyRule', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + + +class SecurityPolicyAdaptiveProtectionConfig(proto.Message): + r"""Configuration options for Cloud Armor Adaptive Protection + (CAAP). + + Attributes: + layer7_ddos_defense_config (google.cloud.compute_v1.types.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig): + If set to true, enables Cloud Armor Machine + Learning. + + This field is a member of `oneof`_ ``_layer7_ddos_defense_config``. + """ + + layer7_ddos_defense_config = proto.Field( + proto.MESSAGE, + number=437316771, + optional=True, + message='SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig', + ) + + +class SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(proto.Message): + r"""Configuration options for L7 DDoS detection. + + Attributes: + enable (bool): + If set to true, enables CAAP for L7 DDoS + detection. + + This field is a member of `oneof`_ ``_enable``. + rule_visibility (str): + Rule visibility can be one of the following: + STANDARD - opaque rules. (default) PREMIUM - + transparent rules. Check the RuleVisibility enum + for the list of possible values. + + This field is a member of `oneof`_ ``_rule_visibility``. + """ + class RuleVisibility(proto.Enum): + r"""Rule visibility can be one of the following: STANDARD - + opaque rules. (default) PREMIUM - transparent rules. + """ + UNDEFINED_RULE_VISIBILITY = 0 + PREMIUM = 399530551 + STANDARD = 484642493 + + enable = proto.Field( + proto.BOOL, + number=311764355, + optional=True, + ) + rule_visibility = proto.Field( + proto.STRING, + number=453258293, + optional=True, + ) + + +class SecurityPolicyAdvancedOptionsConfig(proto.Message): + r""" + + Attributes: + json_parsing (str): + Check the JsonParsing enum for the list of + possible values. + + This field is a member of `oneof`_ ``_json_parsing``. + log_level (str): + Check the LogLevel enum for the list of + possible values. + + This field is a member of `oneof`_ ``_log_level``. + """ + class JsonParsing(proto.Enum): + r"""""" + UNDEFINED_JSON_PARSING = 0 + DISABLED = 516696700 + STANDARD = 484642493 + + class LogLevel(proto.Enum): + r"""""" + UNDEFINED_LOG_LEVEL = 0 + NORMAL = 161067239 + VERBOSE = 532219234 + + json_parsing = proto.Field( + proto.STRING, + number=282493529, + optional=True, + ) + log_level = proto.Field( + proto.STRING, + number=140582601, + optional=True, + ) + + +class SecurityPolicyList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.SecurityPolicy]): + A list of SecurityPolicy resources. + kind (str): + [Output Only] Type of resource. Always + compute#securityPolicyList for listsof securityPolicies + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='SecurityPolicy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SecurityPolicyReference(proto.Message): + r""" + + Attributes: + security_policy (str): + + This field is a member of `oneof`_ ``_security_policy``. + """ + + security_policy = proto.Field( + proto.STRING, + number=171082513, + optional=True, + ) + + +class SecurityPolicyRule(proto.Message): + r"""Represents a rule that describes one or more match conditions + along with the action to be taken when traffic matches this + condition (allow or deny). + + Attributes: + action (str): + The Action to perform when the client + connection triggers the rule. Can currently be + either "allow" or "deny()" where valid values + for status are 403, 404, and 502. + + This field is a member of `oneof`_ ``_action``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + kind (str): + [Output only] Type of the resource. Always + compute#securityPolicyRule for security policy rules + + This field is a member of `oneof`_ ``_kind``. + match (google.cloud.compute_v1.types.SecurityPolicyRuleMatcher): + A match condition that incoming traffic is + evaluated against. If it evaluates to true, the + corresponding 'action' is enforced. + + This field is a member of `oneof`_ ``_match``. + preview (bool): + If set to true, the specified action is not + enforced. + + This field is a member of `oneof`_ ``_preview``. + priority (int): + An integer indicating the priority of a rule + in the list. The priority must be a positive + value between 0 and 2147483647. Rules are + evaluated from highest to lowest priority where + 0 is the highest priority and 2147483647 is the + lowest priority. + + This field is a member of `oneof`_ ``_priority``. + """ + + action = proto.Field( + proto.STRING, + number=187661878, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + match = proto.Field( + proto.MESSAGE, + number=103668165, + optional=True, + message='SecurityPolicyRuleMatcher', + ) + preview = proto.Field( + proto.BOOL, + number=218686408, + optional=True, + ) + priority = proto.Field( + proto.INT32, + number=445151652, + optional=True, + ) + + +class SecurityPolicyRuleMatcher(proto.Message): + r"""Represents a match condition that incoming traffic is + evaluated against. Exactly one field must be specified. + + Attributes: + config (google.cloud.compute_v1.types.SecurityPolicyRuleMatcherConfig): + The configuration options available when specifying + versioned_expr. This field must be specified if + versioned_expr is specified and cannot be specified if + versioned_expr is not specified. + + This field is a member of `oneof`_ ``_config``. + expr (google.cloud.compute_v1.types.Expr): + User defined CEVAL expression. A CEVAL expression is used to + specify match criteria such as origin.ip, source.region_code + and contents in the request header. + + This field is a member of `oneof`_ ``_expr``. + versioned_expr (str): + Preconfigured versioned expression. If this field is + specified, config must also be specified. Available + preconfigured expressions along with their requirements are: + SRC_IPS_V1 - must specify the corresponding src_ip_range + field in config. Check the VersionedExpr enum for the list + of possible values. + + This field is a member of `oneof`_ ``_versioned_expr``. + """ + class VersionedExpr(proto.Enum): + r"""Preconfigured versioned expression. If this field is specified, + config must also be specified. Available preconfigured expressions + along with their requirements are: SRC_IPS_V1 - must specify the + corresponding src_ip_range field in config. + """ + UNDEFINED_VERSIONED_EXPR = 0 + SRC_IPS_V1 = 70925961 + + config = proto.Field( + proto.MESSAGE, + number=255820610, + optional=True, + message='SecurityPolicyRuleMatcherConfig', + ) + expr = proto.Field( + proto.MESSAGE, + number=3127797, + optional=True, + message='Expr', + ) + versioned_expr = proto.Field( + proto.STRING, + number=322286013, + optional=True, + ) + + +class SecurityPolicyRuleMatcherConfig(proto.Message): + r""" + + Attributes: + src_ip_ranges (Sequence[str]): + CIDR IP address range. Maximum number of src_ip_ranges + allowed is 10. + """ + + src_ip_ranges = proto.RepeatedField( + proto.STRING, + number=432128083, + ) + + +class SecuritySettings(proto.Message): + r"""The authentication and authorization settings for a + BackendService. + + Attributes: + client_tls_policy (str): + Optional. A URL referring to a + networksecurity.ClientTlsPolicy resource that describes how + clients should authenticate with this service's backends. + clientTlsPolicy only applies to a global BackendService with + the loadBalancingScheme set to INTERNAL_SELF_MANAGED. If + left blank, communications are not encrypted. Note: This + field currently has no impact. + + This field is a member of `oneof`_ ``_client_tls_policy``. + subject_alt_names (Sequence[str]): + Optional. A list of Subject Alternative Names (SANs) that + the client verifies during a mutual TLS handshake with an + server/endpoint for this BackendService. When the server + presents its X.509 certificate to the client, the client + inspects the certificate's subjectAltName field. If the + field contains one of the specified values, the + communication continues. Otherwise, it fails. This + additional check enables the client to verify that the + server is authorized to run the requested service. Note that + the contents of the server certificate's subjectAltName + field are configured by the Public Key Infrastructure which + provisions server identities. Only applies to a global + BackendService with loadBalancingScheme set to + INTERNAL_SELF_MANAGED. Only applies when BackendService has + an attached clientTlsPolicy with clientCertificate (mTLS + mode). Note: This field currently has no impact. + """ + + client_tls_policy = proto.Field( + proto.STRING, + number=462325226, + optional=True, + ) + subject_alt_names = proto.RepeatedField( + proto.STRING, + number=330029535, + ) + + +class SendDiagnosticInterruptInstanceRequest(proto.Message): + r"""A request message for Instances.SendDiagnosticInterrupt. See + the method description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SendDiagnosticInterruptInstanceResponse(proto.Message): + r"""A response message for Instances.SendDiagnosticInterrupt. See + the method description for details. + + """ + + +class SerialPortOutput(proto.Message): + r"""An instance serial console output. + + Attributes: + contents (str): + [Output Only] The contents of the console output. + + This field is a member of `oneof`_ ``_contents``. + kind (str): + [Output Only] Type of the resource. Always + compute#serialPortOutput for serial port output. + + This field is a member of `oneof`_ ``_kind``. + next_ (int): + [Output Only] The position of the next byte of content, + regardless of whether the content exists, following the + output returned in the ``contents`` property. Use this value + in the next request as the start parameter. + + This field is a member of `oneof`_ ``_next``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + start (int): + The starting byte position of the output that was returned. + This should match the start parameter sent with the request. + If the serial console output exceeds the size of the buffer + (1 MB), older output is overwritten by newer content. The + output start value will indicate the byte position of the + output that was returned, which might be different than the + ``start`` value that was specified in the request. + + This field is a member of `oneof`_ ``_start``. + """ + + contents = proto.Field( + proto.STRING, + number=506419994, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_ = proto.Field( + proto.INT64, + number=3377907, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + start = proto.Field( + proto.INT64, + number=109757538, + optional=True, + ) + + +class ServerBinding(proto.Message): + r""" + + Attributes: + type_ (str): + Check the Type enum for the list of possible + values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""""" + UNDEFINED_TYPE = 0 + RESTART_NODE_ON_ANY_SERVER = 502950985 + RESTART_NODE_ON_MINIMAL_SERVERS = 204166495 + SERVER_BINDING_TYPE_UNSPECIFIED = 180825512 + + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class ServiceAccount(proto.Message): + r"""A service account. + + Attributes: + email (str): + Email address of the service account. + + This field is a member of `oneof`_ ``_email``. + scopes (Sequence[str]): + The list of scopes to be made available for + this service account. + """ + + email = proto.Field( + proto.STRING, + number=96619420, + optional=True, + ) + scopes = proto.RepeatedField( + proto.STRING, + number=165973151, + ) + + +class ServiceAttachment(proto.Message): + r"""Represents a ServiceAttachment resource. A service attachment + represents a service that a producer has exposed. It + encapsulates the load balancer which fronts the service runs and + a list of NAT IP ranges that the producers uses to represent the + consumers connecting to the service. next tag = 20 + + Attributes: + connected_endpoints (Sequence[google.cloud.compute_v1.types.ServiceAttachmentConnectedEndpoint]): + [Output Only] An array of connections for all the consumers + connected to this service attachment. + connection_preference (str): + The connection preference of service attachment. The value + can be set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service + attachment is one that always accepts the connection from + consumer forwarding rules. Check the ConnectionPreference + enum for the list of possible values. + + This field is a member of `oneof`_ ``_connection_preference``. + consumer_accept_lists (Sequence[google.cloud.compute_v1.types.ServiceAttachmentConsumerProjectLimit]): + Projects that are allowed to connect to this + service attachment. + consumer_reject_lists (Sequence[str]): + Projects that are not allowed to connect to + this service attachment. The project can be + specified using its id or number. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + enable_proxy_protocol (bool): + If true, enable the proxy protocol which is + for supplying client TCP/IP address data in TCP + connections that traverse proxies on their way + to destination servers. + + This field is a member of `oneof`_ ``_enable_proxy_protocol``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a ServiceAttachment. An + up-to-date fingerprint must be provided in order + to patch/update the ServiceAttachment; + otherwise, the request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve the + ServiceAttachment. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource type. + The server generates this identifier. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#serviceAttachment for service attachments. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + nat_subnets (Sequence[str]): + An array of URLs where each entry is the URL + of a subnet provided by the service producer to + use for NAT in this service attachment. + producer_forwarding_rule (str): + The URL of a forwarding rule with loadBalancingScheme + INTERNAL\* that is serving the endpoint identified by this + service attachment. + + This field is a member of `oneof`_ ``_producer_forwarding_rule``. + psc_service_attachment_id (google.cloud.compute_v1.types.Uint128): + [Output Only] An 128-bit global unique ID of the PSC service + attachment. + + This field is a member of `oneof`_ ``_psc_service_attachment_id``. + region (str): + [Output Only] URL of the region where the service attachment + resides. This field applies only to the region resource. You + must specify this field as part of the HTTP request URL. It + is not settable as a field in the request body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + target_service (str): + The URL of a service serving the endpoint + identified by this service attachment. + + This field is a member of `oneof`_ ``_target_service``. + """ + class ConnectionPreference(proto.Enum): + r"""The connection preference of service attachment. The value can be + set to ACCEPT_AUTOMATIC. An ACCEPT_AUTOMATIC service attachment is + one that always accepts the connection from consumer forwarding + rules. + """ + UNDEFINED_CONNECTION_PREFERENCE = 0 + ACCEPT_AUTOMATIC = 75250580 + ACCEPT_MANUAL = 373061341 + CONNECTION_PREFERENCE_UNSPECIFIED = 34590772 + + connected_endpoints = proto.RepeatedField( + proto.MESSAGE, + number=72223688, + message='ServiceAttachmentConnectedEndpoint', + ) + connection_preference = proto.Field( + proto.STRING, + number=285818076, + optional=True, + ) + consumer_accept_lists = proto.RepeatedField( + proto.MESSAGE, + number=402725703, + message='ServiceAttachmentConsumerProjectLimit', + ) + consumer_reject_lists = proto.RepeatedField( + proto.STRING, + number=204033182, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + enable_proxy_protocol = proto.Field( + proto.BOOL, + number=363791237, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + nat_subnets = proto.RepeatedField( + proto.STRING, + number=374785944, + ) + producer_forwarding_rule = proto.Field( + proto.STRING, + number=247927889, + optional=True, + ) + psc_service_attachment_id = proto.Field( + proto.MESSAGE, + number=527695214, + optional=True, + message='Uint128', + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + target_service = proto.Field( + proto.STRING, + number=1293831, + optional=True, + ) + + +class ServiceAttachmentAggregatedList(proto.Message): + r"""Contains a list of ServiceAttachmentsScopedList. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ServiceAttachmentAggregatedList.ItemsEntry]): + A list of ServiceAttachmentsScopedList + resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='ServiceAttachmentsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ServiceAttachmentConnectedEndpoint(proto.Message): + r"""[Output Only] A connection connected to this service attachment. + + Attributes: + endpoint (str): + The url of a connected endpoint. + + This field is a member of `oneof`_ ``_endpoint``. + psc_connection_id (int): + The PSC connection id of the connected + endpoint. + + This field is a member of `oneof`_ ``_psc_connection_id``. + status (str): + The status of a connected endpoint to this + service attachment. Check the Status enum for + the list of possible values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""The status of a connected endpoint to this service + attachment. + """ + UNDEFINED_STATUS = 0 + ACCEPTED = 246714279 + CLOSED = 380163436 + PENDING = 35394935 + REJECTED = 174130302 + STATUS_UNSPECIFIED = 42133066 + + endpoint = proto.Field( + proto.STRING, + number=130489749, + optional=True, + ) + psc_connection_id = proto.Field( + proto.UINT64, + number=292082397, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class ServiceAttachmentConsumerProjectLimit(proto.Message): + r""" + + Attributes: + connection_limit (int): + The value of the limit to set. + + This field is a member of `oneof`_ ``_connection_limit``. + project_id_or_num (str): + The project id or number for the project to + set the limit for. + + This field is a member of `oneof`_ ``_project_id_or_num``. + """ + + connection_limit = proto.Field( + proto.UINT32, + number=131403546, + optional=True, + ) + project_id_or_num = proto.Field( + proto.STRING, + number=349783336, + optional=True, + ) + + +class ServiceAttachmentList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.ServiceAttachment]): + A list of ServiceAttachment resources. + kind (str): + [Output Only] Type of the resource. Always + compute#serviceAttachment for service attachments. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='ServiceAttachment', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ServiceAttachmentsScopedList(proto.Message): + r""" + + Attributes: + service_attachments (Sequence[google.cloud.compute_v1.types.ServiceAttachment]): + A list of ServiceAttachments contained in + this scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of service attachments when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + service_attachments = proto.RepeatedField( + proto.MESSAGE, + number=307136806, + message='ServiceAttachment', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SetBackendServiceTargetSslProxyRequest(proto.Message): + r"""A request message for TargetSslProxies.SetBackendService. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_ssl_proxies_set_backend_service_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetBackendServiceRequest): + The body resource for this request + target_ssl_proxy (str): + Name of the TargetSslProxy resource whose + BackendService resource is to be set. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_ssl_proxies_set_backend_service_request_resource = proto.Field( + proto.MESSAGE, + number=139080868, + message='TargetSslProxiesSetBackendServiceRequest', + ) + target_ssl_proxy = proto.Field( + proto.STRING, + number=338795853, + ) + + +class SetBackendServiceTargetTcpProxyRequest(proto.Message): + r"""A request message for TargetTcpProxies.SetBackendService. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_tcp_proxies_set_backend_service_request_resource (google.cloud.compute_v1.types.TargetTcpProxiesSetBackendServiceRequest): + The body resource for this request + target_tcp_proxy (str): + Name of the TargetTcpProxy resource whose + BackendService resource is to be set. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_tcp_proxies_set_backend_service_request_resource = proto.Field( + proto.MESSAGE, + number=273721583, + message='TargetTcpProxiesSetBackendServiceRequest', + ) + target_tcp_proxy = proto.Field( + proto.STRING, + number=503065442, + ) + + +class SetBackupTargetPoolRequest(proto.Message): + r"""A request message for TargetPools.SetBackup. See the method + description for details. + + Attributes: + failover_ratio (float): + New failoverRatio value for the target pool. + + This field is a member of `oneof`_ ``_failover_ratio``. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_pool (str): + Name of the TargetPool resource to set a + backup pool for. + target_reference_resource (google.cloud.compute_v1.types.TargetReference): + The body resource for this request + """ + + failover_ratio = proto.Field( + proto.FLOAT, + number=212667006, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_pool = proto.Field( + proto.STRING, + number=62796298, + ) + target_reference_resource = proto.Field( + proto.MESSAGE, + number=523721712, + message='TargetReference', + ) + + +class SetCommonInstanceMetadataProjectRequest(proto.Message): + r"""A request message for Projects.SetCommonInstanceMetadata. See + the method description for details. + + Attributes: + metadata_resource (google.cloud.compute_v1.types.Metadata): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + metadata_resource = proto.Field( + proto.MESSAGE, + number=291086110, + message='Metadata', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class SetDefaultNetworkTierProjectRequest(proto.Message): + r"""A request message for Projects.SetDefaultNetworkTier. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + projects_set_default_network_tier_request_resource (google.cloud.compute_v1.types.ProjectsSetDefaultNetworkTierRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + projects_set_default_network_tier_request_resource = proto.Field( + proto.MESSAGE, + number=126410762, + message='ProjectsSetDefaultNetworkTierRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class SetDeletionProtectionInstanceRequest(proto.Message): + r"""A request message for Instances.SetDeletionProtection. See + the method description for details. + + Attributes: + deletion_protection (bool): + Whether the resource should be protected + against deletion. + + This field is a member of `oneof`_ ``_deletion_protection``. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + """ + + deletion_protection = proto.Field( + proto.BOOL, + number=458014698, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetDiskAutoDeleteInstanceRequest(proto.Message): + r"""A request message for Instances.SetDiskAutoDelete. See the + method description for details. + + Attributes: + auto_delete (bool): + Whether to auto-delete the disk when the + instance is deleted. + device_name (str): + The device name of the disk to modify. Make a + get() request on the instance to view currently + attached disks and device names. + instance (str): + The instance name for this request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + auto_delete = proto.Field( + proto.BOOL, + number=464761403, + ) + device_name = proto.Field( + proto.STRING, + number=67541716, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetIamPolicyDiskRequest(proto.Message): + r"""A request message for Disks.SetIamPolicy. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + zone_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=382082107, + message='ZoneSetPolicyRequest', + ) + + +class SetIamPolicyFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.SetIamPolicy. See the + method description for details. + + Attributes: + global_organization_set_policy_request_resource (google.cloud.compute_v1.types.GlobalOrganizationSetPolicyRequest): + The body resource for this request + resource (str): + Name or id of the resource for this request. + """ + + global_organization_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=177408606, + message='GlobalOrganizationSetPolicyRequest', + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicyImageRequest(proto.Message): + r"""A request message for Images.SetIamPolicy. See the method + description for details. + + Attributes: + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=337048498, + message='GlobalSetPolicyRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicyInstanceRequest(proto.Message): + r"""A request message for Instances.SetIamPolicy. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + zone_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=382082107, + message='ZoneSetPolicyRequest', + ) + + +class SetIamPolicyInstanceTemplateRequest(proto.Message): + r"""A request message for InstanceTemplates.SetIamPolicy. See the + method description for details. + + Attributes: + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=337048498, + message='GlobalSetPolicyRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicyLicenseRequest(proto.Message): + r"""A request message for Licenses.SetIamPolicy. See the method + description for details. + + Attributes: + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=337048498, + message='GlobalSetPolicyRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicyNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.SetIamPolicy. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + zone_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=382082107, + message='ZoneSetPolicyRequest', + ) + + +class SetIamPolicyNodeTemplateRequest(proto.Message): + r"""A request message for NodeTemplates.SetIamPolicy. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=276489091, + message='RegionSetPolicyRequest', + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicyRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.SetIamPolicy. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=276489091, + message='RegionSetPolicyRequest', + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicyReservationRequest(proto.Message): + r"""A request message for Reservations.SetIamPolicy. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + zone_set_policy_request_resource (google.cloud.compute_v1.types.ZoneSetPolicyRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + zone_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=382082107, + message='ZoneSetPolicyRequest', + ) + + +class SetIamPolicyResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.SetIamPolicy. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=276489091, + message='RegionSetPolicyRequest', + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicyServiceAttachmentRequest(proto.Message): + r"""A request message for ServiceAttachments.SetIamPolicy. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=276489091, + message='RegionSetPolicyRequest', + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicySnapshotRequest(proto.Message): + r"""A request message for Snapshots.SetIamPolicy. See the method + description for details. + + Attributes: + global_set_policy_request_resource (google.cloud.compute_v1.types.GlobalSetPolicyRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=337048498, + message='GlobalSetPolicyRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetIamPolicySubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.SetIamPolicy. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + region_set_policy_request_resource (google.cloud.compute_v1.types.RegionSetPolicyRequest): + The body resource for this request + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_policy_request_resource = proto.Field( + proto.MESSAGE, + number=276489091, + message='RegionSetPolicyRequest', + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetInstanceTemplateInstanceGroupManagerRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.SetInstanceTemplate. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + instance_group_managers_set_instance_template_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersSetInstanceTemplateRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_set_instance_template_request_resource = proto.Field( + proto.MESSAGE, + number=9809093, + message='InstanceGroupManagersSetInstanceTemplateRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetInstanceTemplateRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.SetInstanceTemplate. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_group_managers_set_template_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersSetTemplateRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_managers_set_template_request_resource = proto.Field( + proto.MESSAGE, + number=187310412, + message='RegionInstanceGroupManagersSetTemplateRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class SetLabelsDiskRequest(proto.Message): + r"""A request message for Disks.SetLabels. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource (str): + Name or id of the resource for this request. + zone (str): + The name of the zone for this request. + zone_set_labels_request_resource (google.cloud.compute_v1.types.ZoneSetLabelsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + zone_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=364950798, + message='ZoneSetLabelsRequest', + ) + + +class SetLabelsExternalVpnGatewayRequest(proto.Message): + r"""A request message for ExternalVpnGateways.SetLabels. See the + method description for details. + + Attributes: + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=319917189, + message='GlobalSetLabelsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetLabelsForwardingRuleRequest(proto.Message): + r"""A request message for ForwardingRules.SetLabels. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The region for this request. + region_set_labels_request_resource (google.cloud.compute_v1.types.RegionSetLabelsRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=259357782, + message='RegionSetLabelsRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetLabelsGlobalForwardingRuleRequest(proto.Message): + r"""A request message for GlobalForwardingRules.SetLabels. See + the method description for details. + + Attributes: + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=319917189, + message='GlobalSetLabelsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetLabelsImageRequest(proto.Message): + r"""A request message for Images.SetLabels. See the method + description for details. + + Attributes: + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=319917189, + message='GlobalSetLabelsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetLabelsInstanceRequest(proto.Message): + r"""A request message for Instances.SetLabels. See the method + description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + instances_set_labels_request_resource (google.cloud.compute_v1.types.InstancesSetLabelsRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=207749344, + message='InstancesSetLabelsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetLabelsRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.SetLabels. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The region for this request. + region_set_labels_request_resource (google.cloud.compute_v1.types.RegionSetLabelsRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=259357782, + message='RegionSetLabelsRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetLabelsSnapshotRequest(proto.Message): + r"""A request message for Snapshots.SetLabels. See the method + description for details. + + Attributes: + global_set_labels_request_resource (google.cloud.compute_v1.types.GlobalSetLabelsRequest): + The body resource for this request + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + """ + + global_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=319917189, + message='GlobalSetLabelsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetLabelsVpnGatewayRequest(proto.Message): + r"""A request message for VpnGateways.SetLabels. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The region for this request. + region_set_labels_request_resource (google.cloud.compute_v1.types.RegionSetLabelsRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + resource (str): + Name or id of the resource for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_set_labels_request_resource = proto.Field( + proto.MESSAGE, + number=259357782, + message='RegionSetLabelsRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + + +class SetMachineResourcesInstanceRequest(proto.Message): + r"""A request message for Instances.SetMachineResources. See the + method description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + instances_set_machine_resources_request_resource (google.cloud.compute_v1.types.InstancesSetMachineResourcesRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_set_machine_resources_request_resource = proto.Field( + proto.MESSAGE, + number=196286318, + message='InstancesSetMachineResourcesRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetMachineTypeInstanceRequest(proto.Message): + r"""A request message for Instances.SetMachineType. See the + method description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + instances_set_machine_type_request_resource (google.cloud.compute_v1.types.InstancesSetMachineTypeRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_set_machine_type_request_resource = proto.Field( + proto.MESSAGE, + number=254157709, + message='InstancesSetMachineTypeRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetMetadataInstanceRequest(proto.Message): + r"""A request message for Instances.SetMetadata. See the method + description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + metadata_resource (google.cloud.compute_v1.types.Metadata): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + metadata_resource = proto.Field( + proto.MESSAGE, + number=291086110, + message='Metadata', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetMinCpuPlatformInstanceRequest(proto.Message): + r"""A request message for Instances.SetMinCpuPlatform. See the + method description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + instances_set_min_cpu_platform_request_resource (google.cloud.compute_v1.types.InstancesSetMinCpuPlatformRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_set_min_cpu_platform_request_resource = proto.Field( + proto.MESSAGE, + number=148459368, + message='InstancesSetMinCpuPlatformRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetNamedPortsInstanceGroupRequest(proto.Message): + r"""A request message for InstanceGroups.SetNamedPorts. See the + method description for details. + + Attributes: + instance_group (str): + The name of the instance group where the + named ports are updated. + instance_groups_set_named_ports_request_resource (google.cloud.compute_v1.types.InstanceGroupsSetNamedPortsRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the instance group + is located. + """ + + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + instance_groups_set_named_ports_request_resource = proto.Field( + proto.MESSAGE, + number=385151535, + message='InstanceGroupsSetNamedPortsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetNamedPortsRegionInstanceGroupRequest(proto.Message): + r"""A request message for RegionInstanceGroups.SetNamedPorts. See + the method description for details. + + Attributes: + instance_group (str): + The name of the regional instance group where + the named ports are updated. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_groups_set_named_ports_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupsSetNamedPortsRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group = proto.Field( + proto.STRING, + number=81095253, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_groups_set_named_ports_request_resource = proto.Field( + proto.MESSAGE, + number=1574938, + message='RegionInstanceGroupsSetNamedPortsRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class SetNodeTemplateNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.SetNodeTemplate. See the + method description for details. + + Attributes: + node_group (str): + Name of the NodeGroup resource to update. + node_groups_set_node_template_request_resource (google.cloud.compute_v1.types.NodeGroupsSetNodeTemplateRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + node_group = proto.Field( + proto.STRING, + number=469958146, + ) + node_groups_set_node_template_request_resource = proto.Field( + proto.MESSAGE, + number=117382321, + message='NodeGroupsSetNodeTemplateRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetPrivateIpGoogleAccessSubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.SetPrivateIpGoogleAccess. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + subnetwork (str): + Name of the Subnetwork resource. + subnetworks_set_private_ip_google_access_request_resource (google.cloud.compute_v1.types.SubnetworksSetPrivateIpGoogleAccessRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + ) + subnetworks_set_private_ip_google_access_request_resource = proto.Field( + proto.MESSAGE, + number=268920696, + message='SubnetworksSetPrivateIpGoogleAccessRequest', + ) + + +class SetProxyHeaderTargetSslProxyRequest(proto.Message): + r"""A request message for TargetSslProxies.SetProxyHeader. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_ssl_proxies_set_proxy_header_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetProxyHeaderRequest): + The body resource for this request + target_ssl_proxy (str): + Name of the TargetSslProxy resource whose + ProxyHeader is to be set. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_ssl_proxies_set_proxy_header_request_resource = proto.Field( + proto.MESSAGE, + number=205284526, + message='TargetSslProxiesSetProxyHeaderRequest', + ) + target_ssl_proxy = proto.Field( + proto.STRING, + number=338795853, + ) + + +class SetProxyHeaderTargetTcpProxyRequest(proto.Message): + r"""A request message for TargetTcpProxies.SetProxyHeader. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_tcp_proxies_set_proxy_header_request_resource (google.cloud.compute_v1.types.TargetTcpProxiesSetProxyHeaderRequest): + The body resource for this request + target_tcp_proxy (str): + Name of the TargetTcpProxy resource whose + ProxyHeader is to be set. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_tcp_proxies_set_proxy_header_request_resource = proto.Field( + proto.MESSAGE, + number=219958339, + message='TargetTcpProxiesSetProxyHeaderRequest', + ) + target_tcp_proxy = proto.Field( + proto.STRING, + number=503065442, + ) + + +class SetQuicOverrideTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.SetQuicOverride. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxies_set_quic_override_request_resource (google.cloud.compute_v1.types.TargetHttpsProxiesSetQuicOverrideRequest): + The body resource for this request + target_https_proxy (str): + Name of the TargetHttpsProxy resource to set + the QUIC override policy for. The name should + conform to RFC1035. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxies_set_quic_override_request_resource = proto.Field( + proto.MESSAGE, + number=72940258, + message='TargetHttpsProxiesSetQuicOverrideRequest', + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class SetSchedulingInstanceRequest(proto.Message): + r"""A request message for Instances.SetScheduling. See the method + description for details. + + Attributes: + instance (str): + Instance name for this request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + scheduling_resource (google.cloud.compute_v1.types.Scheduling): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + scheduling_resource = proto.Field( + proto.MESSAGE, + number=463181401, + message='Scheduling', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetSecurityPolicyBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.SetSecurityPolicy. See + the method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to which + the security policy should be set. The name + should conform to RFC1035. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + security_policy_reference_resource (google.cloud.compute_v1.types.SecurityPolicyReference): + The body resource for this request + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + security_policy_reference_resource = proto.Field( + proto.MESSAGE, + number=204135024, + message='SecurityPolicyReference', + ) + + +class SetServiceAccountInstanceRequest(proto.Message): + r"""A request message for Instances.SetServiceAccount. See the + method description for details. + + Attributes: + instance (str): + Name of the instance resource to start. + instances_set_service_account_request_resource (google.cloud.compute_v1.types.InstancesSetServiceAccountRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_set_service_account_request_resource = proto.Field( + proto.MESSAGE, + number=275550008, + message='InstancesSetServiceAccountRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetShieldedInstanceIntegrityPolicyInstanceRequest(proto.Message): + r"""A request message for + Instances.SetShieldedInstanceIntegrityPolicy. See the method + description for details. + + Attributes: + instance (str): + Name or id of the instance scoping this + request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + shielded_instance_integrity_policy_resource (google.cloud.compute_v1.types.ShieldedInstanceIntegrityPolicy): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + shielded_instance_integrity_policy_resource = proto.Field( + proto.MESSAGE, + number=409169462, + message='ShieldedInstanceIntegrityPolicy', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetSslCertificatesRegionTargetHttpsProxyRequest(proto.Message): + r"""A request message for + RegionTargetHttpsProxies.SetSslCertificates. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_target_https_proxies_set_ssl_certificates_request_resource (google.cloud.compute_v1.types.RegionTargetHttpsProxiesSetSslCertificatesRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy (str): + Name of the TargetHttpsProxy resource to set + an SslCertificates resource for. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_target_https_proxies_set_ssl_certificates_request_resource = proto.Field( + proto.MESSAGE, + number=390693383, + message='RegionTargetHttpsProxiesSetSslCertificatesRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class SetSslCertificatesTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.SetSslCertificates. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxies_set_ssl_certificates_request_resource (google.cloud.compute_v1.types.TargetHttpsProxiesSetSslCertificatesRequest): + The body resource for this request + target_https_proxy (str): + Name of the TargetHttpsProxy resource to set + an SslCertificates resource for. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxies_set_ssl_certificates_request_resource = proto.Field( + proto.MESSAGE, + number=223122908, + message='TargetHttpsProxiesSetSslCertificatesRequest', + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class SetSslCertificatesTargetSslProxyRequest(proto.Message): + r"""A request message for TargetSslProxies.SetSslCertificates. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_ssl_proxies_set_ssl_certificates_request_resource (google.cloud.compute_v1.types.TargetSslProxiesSetSslCertificatesRequest): + The body resource for this request + target_ssl_proxy (str): + Name of the TargetSslProxy resource whose + SslCertificate resource is to be set. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_ssl_proxies_set_ssl_certificates_request_resource = proto.Field( + proto.MESSAGE, + number=147940797, + message='TargetSslProxiesSetSslCertificatesRequest', + ) + target_ssl_proxy = proto.Field( + proto.STRING, + number=338795853, + ) + + +class SetSslPolicyTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.SetSslPolicy. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + ssl_policy_reference_resource (google.cloud.compute_v1.types.SslPolicyReference): + The body resource for this request + target_https_proxy (str): + Name of the TargetHttpsProxy resource whose + SSL policy is to be set. The name must be 1-63 + characters long, and comply with RFC1035. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_policy_reference_resource = proto.Field( + proto.MESSAGE, + number=235403836, + message='SslPolicyReference', + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + + +class SetSslPolicyTargetSslProxyRequest(proto.Message): + r"""A request message for TargetSslProxies.SetSslPolicy. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + ssl_policy_reference_resource (google.cloud.compute_v1.types.SslPolicyReference): + The body resource for this request + target_ssl_proxy (str): + Name of the TargetSslProxy resource whose SSL + policy is to be set. The name must be 1-63 + characters long, and comply with RFC1035. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + ssl_policy_reference_resource = proto.Field( + proto.MESSAGE, + number=235403836, + message='SslPolicyReference', + ) + target_ssl_proxy = proto.Field( + proto.STRING, + number=338795853, + ) + + +class SetTagsInstanceRequest(proto.Message): + r"""A request message for Instances.SetTags. See the method + description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + tags_resource (google.cloud.compute_v1.types.Tags): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + tags_resource = proto.Field( + proto.MESSAGE, + number=331435380, + message='Tags', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetTargetForwardingRuleRequest(proto.Message): + r"""A request message for ForwardingRules.SetTarget. See the + method description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource in which + target is to be set. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_reference_resource (google.cloud.compute_v1.types.TargetReference): + The body resource for this request + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_reference_resource = proto.Field( + proto.MESSAGE, + number=523721712, + message='TargetReference', + ) + + +class SetTargetGlobalForwardingRuleRequest(proto.Message): + r"""A request message for GlobalForwardingRules.SetTarget. See + the method description for details. + + Attributes: + forwarding_rule (str): + Name of the ForwardingRule resource in which + target is to be set. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_reference_resource (google.cloud.compute_v1.types.TargetReference): + The body resource for this request + """ + + forwarding_rule = proto.Field( + proto.STRING, + number=269964030, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_reference_resource = proto.Field( + proto.MESSAGE, + number=523721712, + message='TargetReference', + ) + + +class SetTargetPoolsInstanceGroupManagerRequest(proto.Message): + r"""A request message for InstanceGroupManagers.SetTargetPools. + See the method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. + instance_group_managers_set_target_pools_request_resource (google.cloud.compute_v1.types.InstanceGroupManagersSetTargetPoolsRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_set_target_pools_request_resource = proto.Field( + proto.MESSAGE, + number=281150216, + message='InstanceGroupManagersSetTargetPoolsRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class SetTargetPoolsRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.SetTargetPools. See the method + description for details. + + Attributes: + instance_group_manager (str): + Name of the managed instance group. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_instance_group_managers_set_target_pools_request_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagersSetTargetPoolsRequest): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_managers_set_target_pools_request_resource = proto.Field( + proto.MESSAGE, + number=78734717, + message='RegionInstanceGroupManagersSetTargetPoolsRequest', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class SetUrlMapRegionTargetHttpProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpProxies.SetUrlMap. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_http_proxy (str): + Name of the TargetHttpProxy to set a URL map + for. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_http_proxy = proto.Field( + proto.STRING, + number=206872421, + ) + url_map_reference_resource = proto.Field( + proto.MESSAGE, + number=398701333, + message='UrlMapReference', + ) + + +class SetUrlMapRegionTargetHttpsProxyRequest(proto.Message): + r"""A request message for RegionTargetHttpsProxies.SetUrlMap. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. Specify a + unique request ID so that if you must retry your request, + the server will know to ignore the request if it has already + been completed. For example, consider a situation where you + make an initial request and the request times out. If you + make the request again with the same request ID, the server + can check if original operation with the same request ID was + received, and if so, will ignore the second request. This + prevents clients from accidentally creating duplicate + commitments. The request ID must be a valid UUID with the + exception that zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). end_interface: + MixerMutationRequestBuilder + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy (str): + Name of the TargetHttpsProxy to set a URL map + for. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + url_map_reference_resource = proto.Field( + proto.MESSAGE, + number=398701333, + message='UrlMapReference', + ) + + +class SetUrlMapTargetHttpProxyRequest(proto.Message): + r"""A request message for TargetHttpProxies.SetUrlMap. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_http_proxy (str): + Name of the TargetHttpProxy to set a URL map + for. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_http_proxy = proto.Field( + proto.STRING, + number=206872421, + ) + url_map_reference_resource = proto.Field( + proto.MESSAGE, + number=398701333, + message='UrlMapReference', + ) + + +class SetUrlMapTargetHttpsProxyRequest(proto.Message): + r"""A request message for TargetHttpsProxies.SetUrlMap. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + target_https_proxy (str): + Name of the TargetHttpsProxy resource whose + URL map is to be set. + url_map_reference_resource (google.cloud.compute_v1.types.UrlMapReference): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + target_https_proxy = proto.Field( + proto.STRING, + number=52336748, + ) + url_map_reference_resource = proto.Field( + proto.MESSAGE, + number=398701333, + message='UrlMapReference', + ) + + +class SetUsageExportBucketProjectRequest(proto.Message): + r"""A request message for Projects.SetUsageExportBucket. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + usage_export_location_resource (google.cloud.compute_v1.types.UsageExportLocation): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + usage_export_location_resource = proto.Field( + proto.MESSAGE, + number=20260459, + message='UsageExportLocation', + ) + + +class ShieldedInstanceConfig(proto.Message): + r"""A set of Shielded Instance options. + + Attributes: + enable_integrity_monitoring (bool): + Defines whether the instance has integrity + monitoring enabled. Enabled by default. + + This field is a member of `oneof`_ ``_enable_integrity_monitoring``. + enable_secure_boot (bool): + Defines whether the instance has Secure Boot + enabled. Disabled by default. + + This field is a member of `oneof`_ ``_enable_secure_boot``. + enable_vtpm (bool): + Defines whether the instance has the vTPM + enabled. Enabled by default. + + This field is a member of `oneof`_ ``_enable_vtpm``. + """ + + enable_integrity_monitoring = proto.Field( + proto.BOOL, + number=409071030, + optional=True, + ) + enable_secure_boot = proto.Field( + proto.BOOL, + number=123568638, + optional=True, + ) + enable_vtpm = proto.Field( + proto.BOOL, + number=181858935, + optional=True, + ) + + +class ShieldedInstanceIdentity(proto.Message): + r"""A Shielded Instance Identity. + + Attributes: + encryption_key (google.cloud.compute_v1.types.ShieldedInstanceIdentityEntry): + An Endorsement Key (EK) made by the RSA 2048 + algorithm issued to the Shielded Instance's + vTPM. + + This field is a member of `oneof`_ ``_encryption_key``. + kind (str): + [Output Only] Type of the resource. Always + compute#shieldedInstanceIdentity for shielded Instance + identity entry. + + This field is a member of `oneof`_ ``_kind``. + signing_key (google.cloud.compute_v1.types.ShieldedInstanceIdentityEntry): + An Attestation Key (AK) made by the RSA 2048 + algorithm issued to the Shielded Instance's + vTPM. + + This field is a member of `oneof`_ ``_signing_key``. + """ + + encryption_key = proto.Field( + proto.MESSAGE, + number=488268707, + optional=True, + message='ShieldedInstanceIdentityEntry', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + signing_key = proto.Field( + proto.MESSAGE, + number=320948261, + optional=True, + message='ShieldedInstanceIdentityEntry', + ) + + +class ShieldedInstanceIdentityEntry(proto.Message): + r"""A Shielded Instance Identity Entry. + + Attributes: + ek_cert (str): + A PEM-encoded X.509 certificate. This field + can be empty. + + This field is a member of `oneof`_ ``_ek_cert``. + ek_pub (str): + A PEM-encoded public key. + + This field is a member of `oneof`_ ``_ek_pub``. + """ + + ek_cert = proto.Field( + proto.STRING, + number=450178589, + optional=True, + ) + ek_pub = proto.Field( + proto.STRING, + number=308947940, + optional=True, + ) + + +class ShieldedInstanceIntegrityPolicy(proto.Message): + r"""The policy describes the baseline against which Instance boot + integrity is measured. + + Attributes: + update_auto_learn_policy (bool): + Updates the integrity policy baseline using + the measurements from the VM instance's most + recent boot. + + This field is a member of `oneof`_ ``_update_auto_learn_policy``. + """ + + update_auto_learn_policy = proto.Field( + proto.BOOL, + number=245490215, + optional=True, + ) + + +class SignedUrlKey(proto.Message): + r"""Represents a customer-supplied Signing Key used by Cloud CDN + Signed URLs + + Attributes: + key_name (str): + Name of the key. The name must be 1-63 characters long, and + comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_key_name``. + key_value (str): + 128-bit key value used for signing the URL. + The key value must be a valid RFC 4648 Section 5 + base64url encoded string. + + This field is a member of `oneof`_ ``_key_value``. + """ + + key_name = proto.Field( + proto.STRING, + number=500938859, + optional=True, + ) + key_value = proto.Field( + proto.STRING, + number=504106897, + optional=True, + ) + + +class SimulateMaintenanceEventInstanceRequest(proto.Message): + r"""A request message for Instances.SimulateMaintenanceEvent. See + the method description for details. + + Attributes: + instance (str): + Name of the instance scoping this request. + project (str): + Project ID for this request. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class Snapshot(proto.Message): + r"""Represents a Persistent Disk Snapshot resource. You can use + snapshots to back up data on a regular interval. For more + information, read Creating persistent disk snapshots. + + Attributes: + auto_created (bool): + [Output Only] Set to true if snapshots are automatically + created by applying resource policy on the target disk. + + This field is a member of `oneof`_ ``_auto_created``. + chain_name (str): + Creates the new snapshot in the snapshot + chain labeled with the specified name. The chain + name must be 1-63 characters long and comply + with RFC1035. This is an uncommon option only + for advanced service owners who needs to create + separate snapshot chains, for example, for + chargeback tracking. When you describe your + snapshot resource, this field is visible only if + it has a non-empty value. + + This field is a member of `oneof`_ ``_chain_name``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + disk_size_gb (int): + [Output Only] Size of the source disk, specified in GB. + + This field is a member of `oneof`_ ``_disk_size_gb``. + download_bytes (int): + [Output Only] Number of bytes downloaded to restore a + snapshot to a disk. + + This field is a member of `oneof`_ ``_download_bytes``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#snapshot + for Snapshot resources. + + This field is a member of `oneof`_ ``_kind``. + label_fingerprint (str): + A fingerprint for the labels being applied to + this snapshot, which is essentially a hash of + the labels set used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash in order to update or + change labels, otherwise the request will fail + with error 412 conditionNotMet. To see the + latest fingerprint, make a get() request to + retrieve a snapshot. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.Snapshot.LabelsEntry]): + Labels to apply to this snapshot. These can + be later modified by the setLabels method. Label + values may be empty. + license_codes (Sequence[int]): + [Output Only] Integer license codes indicating which + licenses are attached to this snapshot. + licenses (Sequence[str]): + [Output Only] A list of public visible licenses that apply + to this snapshot. This can be because the original image had + licenses attached (such as a Windows image). + location_hint (str): + An opaque location hint used to place the + snapshot close to other resources. This field is + for use by internal tools that use the public + API. + + This field is a member of `oneof`_ ``_location_hint``. + name (str): + Name of the resource; provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + satisfies_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + snapshot_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + Encrypts the snapshot using a customer- + upplied encryption key. After you encrypt a + snapshot using a customer-supplied key, you must + provide the same key if you use the snapshot + later. For example, you must provide the + encryption key when you create a disk from the + encrypted snapshot in a future request. + Customer-supplied encryption keys do not protect + access to metadata of the snapshot. If you do + not provide an encryption key when creating the + snapshot, then the snapshot will be encrypted + using an automatically generated key and you do + not need to provide a key to use the snapshot + later. + + This field is a member of `oneof`_ ``_snapshot_encryption_key``. + source_disk (str): + The source disk used to create this snapshot. + + This field is a member of `oneof`_ ``_source_disk``. + source_disk_encryption_key (google.cloud.compute_v1.types.CustomerEncryptionKey): + The customer-supplied encryption key of the + source disk. Required if the source disk is + protected by a customer-supplied encryption key. + + This field is a member of `oneof`_ ``_source_disk_encryption_key``. + source_disk_id (str): + [Output Only] The ID value of the disk used to create this + snapshot. This value may be used to determine whether the + snapshot was taken from the current or a previous instance + of a given disk name. + + This field is a member of `oneof`_ ``_source_disk_id``. + status (str): + [Output Only] The status of the snapshot. This can be + CREATING, DELETING, FAILED, READY, or UPLOADING. Check the + Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + storage_bytes (int): + [Output Only] A size of the storage used by the snapshot. As + snapshots share storage, this number is expected to change + with snapshot creation/deletion. + + This field is a member of `oneof`_ ``_storage_bytes``. + storage_bytes_status (str): + [Output Only] An indicator whether storageBytes is in a + stable state or it is being adjusted as a result of shared + storage reallocation. This status can either be UPDATING, + meaning the size of the snapshot is being updated, or + UP_TO_DATE, meaning the size of the snapshot is up-to-date. + Check the StorageBytesStatus enum for the list of possible + values. + + This field is a member of `oneof`_ ``_storage_bytes_status``. + storage_locations (Sequence[str]): + Cloud Storage bucket storage location of the + snapshot (regional or multi-regional). + """ + class Status(proto.Enum): + r"""[Output Only] The status of the snapshot. This can be CREATING, + DELETING, FAILED, READY, or UPLOADING. + """ + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + FAILED = 455706685 + READY = 77848963 + UPLOADING = 267603489 + + class StorageBytesStatus(proto.Enum): + r"""[Output Only] An indicator whether storageBytes is in a stable state + or it is being adjusted as a result of shared storage reallocation. + This status can either be UPDATING, meaning the size of the snapshot + is being updated, or UP_TO_DATE, meaning the size of the snapshot is + up-to-date. + """ + UNDEFINED_STORAGE_BYTES_STATUS = 0 + UPDATING = 494614342 + UP_TO_DATE = 101306702 + + auto_created = proto.Field( + proto.BOOL, + number=463922264, + optional=True, + ) + chain_name = proto.Field( + proto.STRING, + number=68644169, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + disk_size_gb = proto.Field( + proto.INT64, + number=316263735, + optional=True, + ) + download_bytes = proto.Field( + proto.INT64, + number=435054068, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + license_codes = proto.RepeatedField( + proto.INT64, + number=45482664, + ) + licenses = proto.RepeatedField( + proto.STRING, + number=337642578, + ) + location_hint = proto.Field( + proto.STRING, + number=350519505, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + satisfies_pzs = proto.Field( + proto.BOOL, + number=480964267, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + snapshot_encryption_key = proto.Field( + proto.MESSAGE, + number=43334526, + optional=True, + message='CustomerEncryptionKey', + ) + source_disk = proto.Field( + proto.STRING, + number=451753793, + optional=True, + ) + source_disk_encryption_key = proto.Field( + proto.MESSAGE, + number=531501153, + optional=True, + message='CustomerEncryptionKey', + ) + source_disk_id = proto.Field( + proto.STRING, + number=454190809, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + storage_bytes = proto.Field( + proto.INT64, + number=424631719, + optional=True, + ) + storage_bytes_status = proto.Field( + proto.STRING, + number=490739082, + optional=True, + ) + storage_locations = proto.RepeatedField( + proto.STRING, + number=328005274, + ) + + +class SnapshotList(proto.Message): + r"""Contains a list of Snapshot resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Snapshot]): + A list of Snapshot resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Snapshot', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SourceInstanceParams(proto.Message): + r"""A specification of the parameters to use when creating the + instance template from a source instance. + + Attributes: + disk_configs (Sequence[google.cloud.compute_v1.types.DiskInstantiationConfig]): + Attached disks configuration. If not + provided, defaults are applied: For boot disk + and any other R/W disks, new custom images will + be created from each disk. For read-only disks, + they will be attached in read-only mode. Local + SSD disks will be created as blank volumes. + """ + + disk_configs = proto.RepeatedField( + proto.MESSAGE, + number=235580623, + message='DiskInstantiationConfig', + ) + + +class SslCertificate(proto.Message): + r"""Represents an SSL Certificate resource. Google Compute Engine has + two SSL Certificate resources: \* + `Global `__ \* + `Regional `__ + The sslCertificates are used by: - external HTTPS load balancers - + SSL proxy load balancers The regionSslCertificates are used by + internal HTTPS load balancers. Optionally, certificate file contents + that you upload can contain a set of up to five PEM-encoded + certificates. The API call creates an object (sslCertificate) that + holds this data. You can use SSL keys and certificates to secure + connections to a load balancer. For more information, read Creating + and using SSL certificates, SSL certificates quotas and limits, and + Troubleshooting SSL certificates. + + Attributes: + certificate (str): + A value read into memory from a certificate + file. The certificate file must be in PEM + format. The certificate chain must be no greater + than 5 certs long. The chain must include at + least one intermediate cert. + + This field is a member of `oneof`_ ``_certificate``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + expire_time (str): + [Output Only] Expire time of the certificate. RFC3339 + + This field is a member of `oneof`_ ``_expire_time``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#sslCertificate for SSL certificates. + + This field is a member of `oneof`_ ``_kind``. + managed (google.cloud.compute_v1.types.SslCertificateManagedSslCertificate): + Configuration and status of a managed SSL + certificate. + + This field is a member of `oneof`_ ``_managed``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + private_key (str): + A value read into memory from a write-only + private key file. The private key file must be + in PEM format. For security, only insert + requests include this field. + + This field is a member of `oneof`_ ``_private_key``. + region (str): + [Output Only] URL of the region where the regional SSL + Certificate resides. This field is not applicable to global + SSL Certificate. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + self_managed (google.cloud.compute_v1.types.SslCertificateSelfManagedSslCertificate): + Configuration and status of a self-managed + SSL certificate. + + This field is a member of `oneof`_ ``_self_managed``. + subject_alternative_names (Sequence[str]): + [Output Only] Domains associated with the certificate via + Subject Alternative Name. + type_ (str): + (Optional) Specifies the type of SSL certificate, either + "SELF_MANAGED" or "MANAGED". If not specified, the + certificate is self-managed and the fields certificate and + private_key are used. Check the Type enum for the list of + possible values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""(Optional) Specifies the type of SSL certificate, either + "SELF_MANAGED" or "MANAGED". If not specified, the certificate is + self-managed and the fields certificate and private_key are used. + """ + UNDEFINED_TYPE = 0 + MANAGED = 479501183 + SELF_MANAGED = 434437516 + TYPE_UNSPECIFIED = 437714322 + + certificate = proto.Field( + proto.STRING, + number=341787031, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + expire_time = proto.Field( + proto.STRING, + number=440691181, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + managed = proto.Field( + proto.MESSAGE, + number=298389407, + optional=True, + message='SslCertificateManagedSslCertificate', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + private_key = proto.Field( + proto.STRING, + number=361331107, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + self_managed = proto.Field( + proto.MESSAGE, + number=329284012, + optional=True, + message='SslCertificateSelfManagedSslCertificate', + ) + subject_alternative_names = proto.RepeatedField( + proto.STRING, + number=528807907, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class SslCertificateAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.SslCertificateAggregatedList.ItemsEntry]): + A list of SslCertificatesScopedList + resources. + kind (str): + [Output Only] Type of resource. Always + compute#sslCertificateAggregatedList for lists of SSL + Certificates. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='SslCertificatesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SslCertificateList(proto.Message): + r"""Contains a list of SslCertificate resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.SslCertificate]): + A list of SslCertificate resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='SslCertificate', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SslCertificateManagedSslCertificate(proto.Message): + r"""Configuration and status of a managed SSL certificate. + + Attributes: + domain_status (Sequence[google.cloud.compute_v1.types.SslCertificateManagedSslCertificate.DomainStatusEntry]): + [Output only] Detailed statuses of the domains specified for + managed certificate resource. + domains (Sequence[str]): + The domains for which a managed SSL certificate will be + generated. Each Google-managed SSL certificate supports up + to the `maximum number of domains per Google-managed SSL + certificate `__. + status (str): + [Output only] Status of the managed certificate resource. + Check the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + """ + class Status(proto.Enum): + r"""[Output only] Status of the managed certificate resource.""" + UNDEFINED_STATUS = 0 + ACTIVE = 314733318 + MANAGED_CERTIFICATE_STATUS_UNSPECIFIED = 474800850 + PROVISIONING = 290896621 + PROVISIONING_FAILED = 76813775 + PROVISIONING_FAILED_PERMANENTLY = 275036203 + RENEWAL_FAILED = 434659076 + + domain_status = proto.MapField( + proto.STRING, + proto.STRING, + number=360305613, + ) + domains = proto.RepeatedField( + proto.STRING, + number=226935855, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + + +class SslCertificateSelfManagedSslCertificate(proto.Message): + r"""Configuration and status of a self-managed SSL certificate. + + Attributes: + certificate (str): + A local certificate file. The certificate + must be in PEM format. The certificate chain + must be no greater than 5 certs long. The chain + must include at least one intermediate cert. + + This field is a member of `oneof`_ ``_certificate``. + private_key (str): + A write-only private key in PEM format. Only + insert requests will include this field. + + This field is a member of `oneof`_ ``_private_key``. + """ + + certificate = proto.Field( + proto.STRING, + number=341787031, + optional=True, + ) + private_key = proto.Field( + proto.STRING, + number=361331107, + optional=True, + ) + + +class SslCertificatesScopedList(proto.Message): + r""" + + Attributes: + ssl_certificates (Sequence[google.cloud.compute_v1.types.SslCertificate]): + List of SslCertificates contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of backend services when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + ssl_certificates = proto.RepeatedField( + proto.MESSAGE, + number=366006543, + message='SslCertificate', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SslPoliciesList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.SslPolicy]): + A list of SslPolicy resources. + kind (str): + [Output Only] Type of the resource. Always + compute#sslPoliciesList for lists of sslPolicies. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='SslPolicy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SslPoliciesListAvailableFeaturesResponse(proto.Message): + r""" + + Attributes: + features (Sequence[str]): + + """ + + features = proto.RepeatedField( + proto.STRING, + number=246211645, + ) + + +class SslPolicy(proto.Message): + r"""Represents an SSL Policy resource. Use SSL policies to + control the SSL features, such as versions and cipher suites, + offered by an HTTPS or SSL Proxy load balancer. For more + information, read SSL Policy Concepts. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + custom_features (Sequence[str]): + A list of features enabled when the selected + profile is CUSTOM. The method returns the set of + features that can be specified in this list. + This field must be empty if the profile is not + CUSTOM. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + enabled_features (Sequence[str]): + [Output Only] The list of features enabled in the SSL + policy. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a SslPolicy. An up-to- + date fingerprint must be provided in order to + update the SslPolicy, otherwise the request will + fail with error 412 conditionNotMet. To see the + latest fingerprint, make a get() request to + retrieve an SslPolicy. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output only] Type of the resource. Always + compute#sslPolicyfor SSL policies. + + This field is a member of `oneof`_ ``_kind``. + min_tls_version (str): + The minimum version of SSL protocol that can be used by the + clients to establish a connection with the load balancer. + This can be one of TLS_1_0, TLS_1_1, TLS_1_2. Check the + MinTlsVersion enum for the list of possible values. + + This field is a member of `oneof`_ ``_min_tls_version``. + name (str): + Name of the resource. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + profile (str): + Profile specifies the set of SSL features + that can be used by the load balancer when + negotiating SSL with clients. This can be one of + COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If + using CUSTOM, the set of SSL features to enable + must be specified in the customFeatures field. + Check the Profile enum for the list of possible + values. + + This field is a member of `oneof`_ ``_profile``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + warnings (Sequence[google.cloud.compute_v1.types.Warnings]): + [Output Only] If potential misconfigurations are detected + for this SSL policy, this field will be populated with + warning messages. + """ + class MinTlsVersion(proto.Enum): + r"""The minimum version of SSL protocol that can be used by the clients + to establish a connection with the load balancer. This can be one of + TLS_1_0, TLS_1_1, TLS_1_2. + """ + UNDEFINED_MIN_TLS_VERSION = 0 + TLS_1_0 = 33116734 + TLS_1_1 = 33116735 + TLS_1_2 = 33116736 + + class Profile(proto.Enum): + r"""Profile specifies the set of SSL features that can be used by + the load balancer when negotiating SSL with clients. This can be + one of COMPATIBLE, MODERN, RESTRICTED, or CUSTOM. If using + CUSTOM, the set of SSL features to enable must be specified in + the customFeatures field. + """ + UNDEFINED_PROFILE = 0 + COMPATIBLE = 179357396 + CUSTOM = 388595569 + MODERN = 132013855 + RESTRICTED = 261551195 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + custom_features = proto.RepeatedField( + proto.STRING, + number=34789707, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + enabled_features = proto.RepeatedField( + proto.STRING, + number=469017467, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + min_tls_version = proto.Field( + proto.STRING, + number=8155943, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + profile = proto.Field( + proto.STRING, + number=227445161, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warnings = proto.RepeatedField( + proto.MESSAGE, + number=498091095, + message='Warnings', + ) + + +class SslPolicyReference(proto.Message): + r""" + + Attributes: + ssl_policy (str): + URL of the SSL policy resource. Set this to + empty string to clear any existing SSL policy + associated with the target proxy resource. + + This field is a member of `oneof`_ ``_ssl_policy``. + """ + + ssl_policy = proto.Field( + proto.STRING, + number=295190213, + optional=True, + ) + + +class StartInstanceRequest(proto.Message): + r"""A request message for Instances.Start. See the method + description for details. + + Attributes: + instance (str): + Name of the instance resource to start. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class StartWithEncryptionKeyInstanceRequest(proto.Message): + r"""A request message for Instances.StartWithEncryptionKey. See + the method description for details. + + Attributes: + instance (str): + Name of the instance resource to start. + instances_start_with_encryption_key_request_resource (google.cloud.compute_v1.types.InstancesStartWithEncryptionKeyRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instances_start_with_encryption_key_request_resource = proto.Field( + proto.MESSAGE, + number=441712511, + message='InstancesStartWithEncryptionKeyRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class StatefulPolicy(proto.Message): + r""" + + Attributes: + preserved_state (google.cloud.compute_v1.types.StatefulPolicyPreservedState): + + This field is a member of `oneof`_ ``_preserved_state``. + """ + + preserved_state = proto.Field( + proto.MESSAGE, + number=2634026, + optional=True, + message='StatefulPolicyPreservedState', + ) + + +class StatefulPolicyPreservedState(proto.Message): + r"""Configuration of preserved resources. + + Attributes: + disks (Sequence[google.cloud.compute_v1.types.StatefulPolicyPreservedState.DisksEntry]): + Disks created on the instances that will be + preserved on instance delete, update, etc. This + map is keyed with the device names of the disks. + """ + + disks = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=95594102, + message='StatefulPolicyPreservedStateDiskDevice', + ) + + +class StatefulPolicyPreservedStateDiskDevice(proto.Message): + r""" + + Attributes: + auto_delete (str): + These stateful disks will never be deleted during + autohealing, update or VM instance recreate operations. This + flag is used to configure if the disk should be deleted + after it is no longer used by the group, e.g. when the given + instance or the whole group is deleted. Note: disks attached + in READ_ONLY mode cannot be auto-deleted. Check the + AutoDelete enum for the list of possible values. + + This field is a member of `oneof`_ ``_auto_delete``. + """ + class AutoDelete(proto.Enum): + r"""These stateful disks will never be deleted during autohealing, + update or VM instance recreate operations. This flag is used to + configure if the disk should be deleted after it is no longer used + by the group, e.g. when the given instance or the whole group is + deleted. Note: disks attached in READ_ONLY mode cannot be + auto-deleted. + """ + UNDEFINED_AUTO_DELETE = 0 + NEVER = 74175084 + ON_PERMANENT_INSTANCE_DELETION = 95727719 + + auto_delete = proto.Field( + proto.STRING, + number=464761403, + optional=True, + ) + + +class StopInstanceRequest(proto.Message): + r"""A request message for Instances.Stop. See the method + description for details. + + Attributes: + instance (str): + Name of the instance resource to stop. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class Subnetwork(proto.Message): + r"""Represents a Subnetwork resource. A subnetwork (also known as + a subnet) is a logical partition of a Virtual Private Cloud + network with one primary IP range and zero or more secondary IP + ranges. For more information, read Virtual Private Cloud (VPC) + Network. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. This field can be set only at resource + creation time. + + This field is a member of `oneof`_ ``_description``. + enable_flow_logs (bool): + Whether to enable flow logging for this subnetwork. If this + field is not explicitly set, it will not appear in get + listings. If not set the default behavior is to disable flow + logging. This field isn't supported with the purpose field + set to INTERNAL_HTTPS_LOAD_BALANCER. + + This field is a member of `oneof`_ ``_enable_flow_logs``. + external_ipv6_prefix (str): + [Output Only] The range of external IPv6 addresses that are + owned by this subnetwork. + + This field is a member of `oneof`_ ``_external_ipv6_prefix``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a Subnetwork. An up-to- + date fingerprint must be provided in order to + update the Subnetwork, otherwise the request + will fail with error 412 conditionNotMet. To see + the latest fingerprint, make a get() request to + retrieve a Subnetwork. + + This field is a member of `oneof`_ ``_fingerprint``. + gateway_address (str): + [Output Only] The gateway address for default routes to + reach destination addresses outside this subnetwork. + + This field is a member of `oneof`_ ``_gateway_address``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + ip_cidr_range (str): + The range of internal addresses that are + owned by this subnetwork. Provide this property + when you create the subnetwork. For example, + 10.0.0.0/8 or 100.64.0.0/10. Ranges must be + unique and non-overlapping within a network. + Only IPv4 is supported. This field is set at + resource creation time. The range can be any + range listed in the Valid ranges list. The range + can be expanded after creation using + expandIpCidrRange. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + ipv6_access_type (str): + The access type of IPv6 address this subnet holds. It's + immutable and can only be specified during creation or the + first time the subnet is updated into IPV4_IPV6 dual stack. + If the ipv6_type is EXTERNAL then this subnet cannot enable + direct path. Check the Ipv6AccessType enum for the list of + possible values. + + This field is a member of `oneof`_ ``_ipv6_access_type``. + ipv6_cidr_range (str): + [Output Only] The range of internal IPv6 addresses that are + owned by this subnetwork. + + This field is a member of `oneof`_ ``_ipv6_cidr_range``. + kind (str): + [Output Only] Type of the resource. Always + compute#subnetwork for Subnetwork resources. + + This field is a member of `oneof`_ ``_kind``. + log_config (google.cloud.compute_v1.types.SubnetworkLogConfig): + This field denotes the VPC flow logging + options for this subnetwork. If logging is + enabled, logs are exported to Cloud Logging. + + This field is a member of `oneof`_ ``_log_config``. + name (str): + The name of the resource, provided by the client when + initially creating the resource. The name must be 1-63 + characters long, and comply with RFC1035. Specifically, the + name must be 1-63 characters long and match the regular + expression ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the + first character must be a lowercase letter, and all + following characters must be a dash, lowercase letter, or + digit, except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network (str): + The URL of the network to which this + subnetwork belongs, provided by the client when + initially creating the subnetwork. This field + can be set only at resource creation time. + + This field is a member of `oneof`_ ``_network``. + private_ip_google_access (bool): + Whether the VMs in this subnet can access + Google services without assigned external IP + addresses. This field can be both set at + resource creation time and updated using + setPrivateIpGoogleAccess. + + This field is a member of `oneof`_ ``_private_ip_google_access``. + private_ipv6_google_access (str): + The private IPv6 google access type for the + VMs in this subnet. This is an expanded field of + enablePrivateV6Access. If both fields are set, + privateIpv6GoogleAccess will take priority. This + field can be both set at resource creation time + and updated using patch. Check the + PrivateIpv6GoogleAccess enum for the list of + possible values. + + This field is a member of `oneof`_ ``_private_ipv6_google_access``. + purpose (str): + The purpose of the resource. This field can be either + PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A + subnetwork with purpose set to INTERNAL_HTTPS_LOAD_BALANCER + is a user-created subnetwork that is reserved for Internal + HTTP(S) Load Balancing. If unspecified, the purpose defaults + to PRIVATE_RFC_1918. The enableFlowLogs field isn't + supported with the purpose field set to + INTERNAL_HTTPS_LOAD_BALANCER. Check the Purpose enum for the + list of possible values. + + This field is a member of `oneof`_ ``_purpose``. + region (str): + URL of the region where the Subnetwork + resides. This field can be set only at resource + creation time. + + This field is a member of `oneof`_ ``_region``. + role (str): + The role of subnetwork. Currently, this field is only used + when purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can + be set to ACTIVE or BACKUP. An ACTIVE subnetwork is one that + is currently being used for Internal HTTP(S) Load Balancing. + A BACKUP subnetwork is one that is ready to be promoted to + ACTIVE or is currently draining. This field can be updated + with a patch request. Check the Role enum for the list of + possible values. + + This field is a member of `oneof`_ ``_role``. + secondary_ip_ranges (Sequence[google.cloud.compute_v1.types.SubnetworkSecondaryRange]): + An array of configurations for secondary IP + ranges for VM instances contained in this + subnetwork. The primary IP of such VM must + belong to the primary ipCidrRange of the + subnetwork. The alias IPs may belong to either + primary or secondary ranges. This field can be + updated with a patch request. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + stack_type (str): + The stack type for this subnet to identify whether the IPv6 + feature is enabled or not. If not specified IPV4_ONLY will + be used. This field can be both set at resource creation + time and updated using patch. Check the StackType enum for + the list of possible values. + + This field is a member of `oneof`_ ``_stack_type``. + state (str): + [Output Only] The state of the subnetwork, which can be one + of the following values: READY: Subnetwork is created and + ready to use DRAINING: only applicable to subnetworks that + have the purpose set to INTERNAL_HTTPS_LOAD_BALANCER and + indicates that connections to the load balancer are being + drained. A subnetwork that is draining cannot be used or + modified until it reaches a status of READY Check the State + enum for the list of possible values. + + This field is a member of `oneof`_ ``_state``. + """ + class Ipv6AccessType(proto.Enum): + r"""The access type of IPv6 address this subnet holds. It's immutable + and can only be specified during creation or the first time the + subnet is updated into IPV4_IPV6 dual stack. If the ipv6_type is + EXTERNAL then this subnet cannot enable direct path. + """ + UNDEFINED_IPV6_ACCESS_TYPE = 0 + EXTERNAL = 35607499 + UNSPECIFIED_IPV6_ACCESS_TYPE = 313080613 + + class PrivateIpv6GoogleAccess(proto.Enum): + r"""The private IPv6 google access type for the VMs in this + subnet. This is an expanded field of enablePrivateV6Access. If + both fields are set, privateIpv6GoogleAccess will take priority. + This field can be both set at resource creation time and updated + using patch. + """ + UNDEFINED_PRIVATE_IPV6_GOOGLE_ACCESS = 0 + DISABLE_GOOGLE_ACCESS = 450958579 + ENABLE_BIDIRECTIONAL_ACCESS_TO_GOOGLE = 427975994 + ENABLE_OUTBOUND_VM_ACCESS_TO_GOOGLE = 288210263 + + class Purpose(proto.Enum): + r"""The purpose of the resource. This field can be either + PRIVATE_RFC_1918 or INTERNAL_HTTPS_LOAD_BALANCER. A subnetwork with + purpose set to INTERNAL_HTTPS_LOAD_BALANCER is a user-created + subnetwork that is reserved for Internal HTTP(S) Load Balancing. If + unspecified, the purpose defaults to PRIVATE_RFC_1918. The + enableFlowLogs field isn't supported with the purpose field set to + INTERNAL_HTTPS_LOAD_BALANCER. + """ + UNDEFINED_PURPOSE = 0 + INTERNAL_HTTPS_LOAD_BALANCER = 248748889 + PRIVATE = 403485027 + PRIVATE_RFC_1918 = 254902107 + PRIVATE_SERVICE_CONNECT = 48134724 + + class Role(proto.Enum): + r"""The role of subnetwork. Currently, this field is only used when + purpose = INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to + ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently + being used for Internal HTTP(S) Load Balancing. A BACKUP subnetwork + is one that is ready to be promoted to ACTIVE or is currently + draining. This field can be updated with a patch request. + """ + UNDEFINED_ROLE = 0 + ACTIVE = 314733318 + BACKUP = 341010882 + + class StackType(proto.Enum): + r"""The stack type for this subnet to identify whether the IPv6 feature + is enabled or not. If not specified IPV4_ONLY will be used. This + field can be both set at resource creation time and updated using + patch. + """ + UNDEFINED_STACK_TYPE = 0 + IPV4_IPV6 = 22197249 + IPV4_ONLY = 22373798 + UNSPECIFIED_STACK_TYPE = 298084569 + + class State(proto.Enum): + r"""[Output Only] The state of the subnetwork, which can be one of the + following values: READY: Subnetwork is created and ready to use + DRAINING: only applicable to subnetworks that have the purpose set + to INTERNAL_HTTPS_LOAD_BALANCER and indicates that connections to + the load balancer are being drained. A subnetwork that is draining + cannot be used or modified until it reaches a status of READY + """ + UNDEFINED_STATE = 0 + DRAINING = 480455402 + READY = 77848963 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + enable_flow_logs = proto.Field( + proto.BOOL, + number=151544420, + optional=True, + ) + external_ipv6_prefix = proto.Field( + proto.STRING, + number=139299190, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + gateway_address = proto.Field( + proto.STRING, + number=459867385, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + ipv6_access_type = proto.Field( + proto.STRING, + number=504658653, + optional=True, + ) + ipv6_cidr_range = proto.Field( + proto.STRING, + number=273141258, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + log_config = proto.Field( + proto.MESSAGE, + number=351299741, + optional=True, + message='SubnetworkLogConfig', + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + private_ip_google_access = proto.Field( + proto.BOOL, + number=421491790, + optional=True, + ) + private_ipv6_google_access = proto.Field( + proto.STRING, + number=48277006, + optional=True, + ) + purpose = proto.Field( + proto.STRING, + number=316407070, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + role = proto.Field( + proto.STRING, + number=3506294, + optional=True, + ) + secondary_ip_ranges = proto.RepeatedField( + proto.MESSAGE, + number=136658915, + message='SubnetworkSecondaryRange', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + stack_type = proto.Field( + proto.STRING, + number=425908881, + optional=True, + ) + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + + +class SubnetworkAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.SubnetworkAggregatedList.ItemsEntry]): + A list of SubnetworksScopedList resources. + kind (str): + [Output Only] Type of resource. Always + compute#subnetworkAggregatedList for aggregated lists of + subnetworks. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='SubnetworksScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SubnetworkList(proto.Message): + r"""Contains a list of Subnetwork resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Subnetwork]): + A list of Subnetwork resources. + kind (str): + [Output Only] Type of resource. Always + compute#subnetworkList for lists of subnetworks. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Subnetwork', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SubnetworkLogConfig(proto.Message): + r"""The available logging options for this subnetwork. + + Attributes: + aggregation_interval (str): + Can only be specified if VPC flow logging for + this subnetwork is enabled. Toggles the + aggregation interval for collecting flow logs. + Increasing the interval time will reduce the + amount of generated flow logs for long lasting + connections. Default is an interval of 5 seconds + per connection. Check the AggregationInterval + enum for the list of possible values. + + This field is a member of `oneof`_ ``_aggregation_interval``. + enable (bool): + Whether to enable flow logging for this + subnetwork. If this field is not explicitly set, + it will not appear in get listings. If not set + the default behavior is to disable flow logging. + + This field is a member of `oneof`_ ``_enable``. + filter_expr (str): + Can only be specified if VPC flow logs for + this subnetwork is enabled. Export filter used + to define which VPC flow logs should be logged. + + This field is a member of `oneof`_ ``_filter_expr``. + flow_sampling (float): + Can only be specified if VPC flow logging for this + subnetwork is enabled. The value of the field must be in [0, + 1]. Set the sampling rate of VPC flow logs within the + subnetwork where 1.0 means all collected logs are reported + and 0.0 means no logs are reported. Default is 0.5, which + means half of all collected logs are reported. + + This field is a member of `oneof`_ ``_flow_sampling``. + metadata (str): + Can only be specified if VPC flow logs for this subnetwork + is enabled. Configures whether all, none or a subset of + metadata fields should be added to the reported VPC flow + logs. Default is EXCLUDE_ALL_METADATA. Check the Metadata + enum for the list of possible values. + + This field is a member of `oneof`_ ``_metadata``. + metadata_fields (Sequence[str]): + Can only be specified if VPC flow logs for this subnetwork + is enabled and "metadata" was set to CUSTOM_METADATA. + """ + class AggregationInterval(proto.Enum): + r"""Can only be specified if VPC flow logging for this subnetwork + is enabled. Toggles the aggregation interval for collecting flow + logs. Increasing the interval time will reduce the amount of + generated flow logs for long lasting connections. Default is an + interval of 5 seconds per connection. + """ + UNDEFINED_AGGREGATION_INTERVAL = 0 + INTERVAL_10_MIN = 487155916 + INTERVAL_15_MIN = 491773521 + INTERVAL_1_MIN = 69052714 + INTERVAL_30_SEC = 7548937 + INTERVAL_5_MIN = 72746798 + INTERVAL_5_SEC = 72752429 + + class Metadata(proto.Enum): + r"""Can only be specified if VPC flow logs for this subnetwork is + enabled. Configures whether all, none or a subset of metadata fields + should be added to the reported VPC flow logs. Default is + EXCLUDE_ALL_METADATA. + """ + UNDEFINED_METADATA = 0 + CUSTOM_METADATA = 62450749 + EXCLUDE_ALL_METADATA = 334519954 + INCLUDE_ALL_METADATA = 164619908 + + aggregation_interval = proto.Field( + proto.STRING, + number=174919042, + optional=True, + ) + enable = proto.Field( + proto.BOOL, + number=311764355, + optional=True, + ) + filter_expr = proto.Field( + proto.STRING, + number=183374428, + optional=True, + ) + flow_sampling = proto.Field( + proto.FLOAT, + number=530150360, + optional=True, + ) + metadata = proto.Field( + proto.STRING, + number=86866735, + optional=True, + ) + metadata_fields = proto.RepeatedField( + proto.STRING, + number=378461641, + ) + + +class SubnetworkSecondaryRange(proto.Message): + r"""Represents a secondary IP range of a subnetwork. + + Attributes: + ip_cidr_range (str): + The range of IP addresses belonging to this + subnetwork secondary range. Provide this + property when you create the subnetwork. Ranges + must be unique and non-overlapping with all + primary and secondary IP ranges within a + network. Only IPv4 is supported. The range can + be any range listed in the Valid ranges list. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + range_name (str): + The name associated with this subnetwork + secondary range, used when adding an alias IP + range to a VM instance. The name must be 1-63 + characters long, and comply with RFC1035. The + name must be unique within the subnetwork. + + This field is a member of `oneof`_ ``_range_name``. + """ + + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + range_name = proto.Field( + proto.STRING, + number=332216397, + optional=True, + ) + + +class SubnetworksExpandIpCidrRangeRequest(proto.Message): + r""" + + Attributes: + ip_cidr_range (str): + The IP (in CIDR format or netmask) of + internal addresses that are legal on this + Subnetwork. This range should be disjoint from + other subnetworks within this network. This + range can only be larger than (i.e. a superset + of) the range previously defined before the + update. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + """ + + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + + +class SubnetworksScopedList(proto.Message): + r""" + + Attributes: + subnetworks (Sequence[google.cloud.compute_v1.types.Subnetwork]): + A list of subnetworks contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + An informational warning that appears when + the list of addresses is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + subnetworks = proto.RepeatedField( + proto.MESSAGE, + number=415853125, + message='Subnetwork', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class SubnetworksSetPrivateIpGoogleAccessRequest(proto.Message): + r""" + + Attributes: + private_ip_google_access (bool): + + This field is a member of `oneof`_ ``_private_ip_google_access``. + """ + + private_ip_google_access = proto.Field( + proto.BOOL, + number=421491790, + optional=True, + ) + + +class Subsetting(proto.Message): + r"""Subsetting configuration for this BackendService. Currently + this is applicable only for Internal TCP/UDP load balancing, + Internal HTTP(S) load balancing and Traffic Director. + + Attributes: + policy (str): + Check the Policy enum for the list of + possible values. + + This field is a member of `oneof`_ ``_policy``. + """ + class Policy(proto.Enum): + r"""""" + UNDEFINED_POLICY = 0 + CONSISTENT_HASH_SUBSETTING = 108989492 + NONE = 2402104 + + policy = proto.Field( + proto.STRING, + number=91071794, + optional=True, + ) + + +class SwitchToCustomModeNetworkRequest(proto.Message): + r"""A request message for Networks.SwitchToCustomMode. See the + method description for details. + + Attributes: + network (str): + Name of the network to be updated. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class TCPHealthCheck(proto.Message): + r""" + + Attributes: + port (int): + The TCP port number for the health check + request. The default value is 80. Valid values + are 1 through 65535. + + This field is a member of `oneof`_ ``_port``. + port_name (str): + Port name as defined in InstanceGroup#NamedPort#name. If + both port and port_name are defined, port takes precedence. + + This field is a member of `oneof`_ ``_port_name``. + port_specification (str): + Specifies how port is selected for health checking, can be + one of following values: USE_FIXED_PORT: The port number in + port is used for health checking. USE_NAMED_PORT: The + portName is used for health checking. USE_SERVING_PORT: For + NetworkEndpointGroup, the port specified for each network + endpoint is used for health checking. For other backends, + the port or named port specified in the Backend Service is + used for health checking. If not specified, TCP health check + follows behavior specified in port and portName fields. + Check the PortSpecification enum for the list of possible + values. + + This field is a member of `oneof`_ ``_port_specification``. + proxy_header (str): + Specifies the type of proxy header to append before sending + data to the backend, either NONE or PROXY_V1. The default is + NONE. Check the ProxyHeader enum for the list of possible + values. + + This field is a member of `oneof`_ ``_proxy_header``. + request (str): + The application data to send once the TCP + connection has been established (default value + is empty). If both request and response are + empty, the connection establishment alone will + indicate health. The request data can only be + ASCII. + + This field is a member of `oneof`_ ``_request``. + response (str): + The bytes to match against the beginning of + the response data. If left empty (the default + value), any response will indicate health. The + response data can only be ASCII. + + This field is a member of `oneof`_ ``_response``. + """ + class PortSpecification(proto.Enum): + r"""Specifies how port is selected for health checking, can be one of + following values: USE_FIXED_PORT: The port number in port is used + for health checking. USE_NAMED_PORT: The portName is used for health + checking. USE_SERVING_PORT: For NetworkEndpointGroup, the port + specified for each network endpoint is used for health checking. For + other backends, the port or named port specified in the Backend + Service is used for health checking. If not specified, TCP health + check follows behavior specified in port and portName fields. + """ + UNDEFINED_PORT_SPECIFICATION = 0 + USE_FIXED_PORT = 190235748 + USE_NAMED_PORT = 349300671 + USE_SERVING_PORT = 362637516 + + class ProxyHeader(proto.Enum): + r"""Specifies the type of proxy header to append before sending data to + the backend, either NONE or PROXY_V1. The default is NONE. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + port = proto.Field( + proto.INT32, + number=3446913, + optional=True, + ) + port_name = proto.Field( + proto.STRING, + number=41534345, + optional=True, + ) + port_specification = proto.Field( + proto.STRING, + number=51590597, + optional=True, + ) + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + request = proto.Field( + proto.STRING, + number=21951119, + optional=True, + ) + response = proto.Field( + proto.STRING, + number=196547649, + optional=True, + ) + + +class Tags(proto.Message): + r"""A set of instance tags. + + Attributes: + fingerprint (str): + Specifies a fingerprint for this request, + which is essentially a hash of the tags' + contents and used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update tags. You must always provide an up- + to-date fingerprint hash in order to update or + change tags. To see the latest fingerprint, make + get() request to the instance. + + This field is a member of `oneof`_ ``_fingerprint``. + items (Sequence[str]): + An array of tags. Each tag must be 1-63 + characters long, and comply with RFC1035. + """ + + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + items = proto.RepeatedField( + proto.STRING, + number=100526016, + ) + + +class TargetGrpcProxy(proto.Message): + r"""Represents a Target gRPC Proxy resource. A target gRPC proxy is a + component of load balancers intended for load balancing gRPC + traffic. Only global forwarding rules with load balancing scheme + INTERNAL_SELF_MANAGED can reference a target gRPC proxy. The target + gRPC Proxy references a URL map that specifies how traffic is routed + to gRPC backend services. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a TargetGrpcProxy. An up- + to-date fingerprint must be provided in order to + patch/update the TargetGrpcProxy; otherwise, the + request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve the + TargetGrpcProxy. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource type. + The server generates this identifier. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#targetGrpcProxy for target grpc proxies. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + self_link_with_id (str): + [Output Only] Server-defined URL with id for the resource. + + This field is a member of `oneof`_ ``_self_link_with_id``. + url_map (str): + URL to the UrlMap resource that defines the + mapping from URL to the BackendService. The + protocol field in the BackendService must be set + to GRPC. + + This field is a member of `oneof`_ ``_url_map``. + validate_for_proxyless (bool): + If true, indicates that the BackendServices + referenced by the urlMap may be accessed by gRPC + applications without using a sidecar proxy. This + will enable configuration checks on urlMap and + its referenced BackendServices to not allow + unsupported features. A gRPC application must + use "xds:///" scheme in the target URI of the + service it is connecting to. If false, indicates + that the BackendServices referenced by the + urlMap will be accessed by gRPC applications via + a sidecar proxy. In this case, a gRPC + application must not use "xds:///" scheme in the + target URI of the service it is connecting to + + This field is a member of `oneof`_ ``_validate_for_proxyless``. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + self_link_with_id = proto.Field( + proto.STRING, + number=44520962, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + optional=True, + ) + validate_for_proxyless = proto.Field( + proto.BOOL, + number=101822888, + optional=True, + ) + + +class TargetGrpcProxyList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetGrpcProxy]): + A list of TargetGrpcProxy resources. + kind (str): + [Output Only] Type of the resource. Always + compute#targetGrpcProxy for target grpc proxies. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetGrpcProxy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetHttpProxiesScopedList(proto.Message): + r""" + + Attributes: + target_http_proxies (Sequence[google.cloud.compute_v1.types.TargetHttpProxy]): + A list of TargetHttpProxies contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of backend services when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + target_http_proxies = proto.RepeatedField( + proto.MESSAGE, + number=162147011, + message='TargetHttpProxy', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetHttpProxy(proto.Message): + r"""Represents a Target HTTP Proxy resource. Google Compute Engine has + two Target HTTP Proxy resources: \* + `Global `__ \* + `Regional `__ + A target HTTP proxy is a component of GCP HTTP load balancers. \* + targetHttpProxies are used by external HTTP load balancers and + Traffic Director. \* regionTargetHttpProxies are used by internal + HTTP load balancers. Forwarding rules reference a target HTTP proxy, + and the target proxy then references a URL map. For more + information, read Using Target Proxies and Forwarding rule concepts. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a TargetHttpProxy. An up- + to-date fingerprint must be provided in order to + patch/update the TargetHttpProxy; otherwise, the + request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve the + TargetHttpProxy. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always + compute#targetHttpProxy for target HTTP proxies. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + proxy_bind (bool): + This field only applies when the forwarding rule that + references this target proxy has a loadBalancingScheme set + to INTERNAL_SELF_MANAGED. When this field is set to true, + Envoy proxies set up inbound traffic interception and bind + to the IP address and port specified in the forwarding rule. + This is generally useful when using Traffic Director to + configure Envoy as a gateway or middle proxy (in other + words, not a sidecar proxy). The Envoy proxy listens for + inbound requests and handles requests when it receives them. + The default is false. + + This field is a member of `oneof`_ ``_proxy_bind``. + region (str): + [Output Only] URL of the region where the regional Target + HTTP Proxy resides. This field is not applicable to global + Target HTTP Proxies. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + url_map (str): + URL to the UrlMap resource that defines the + mapping from URL to the BackendService. + + This field is a member of `oneof`_ ``_url_map``. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + proxy_bind = proto.Field( + proto.BOOL, + number=286025582, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + optional=True, + ) + + +class TargetHttpProxyAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetHttpProxyAggregatedList.ItemsEntry]): + A list of TargetHttpProxiesScopedList + resources. + kind (str): + [Output Only] Type of resource. Always + compute#targetHttpProxyAggregatedList for lists of Target + HTTP Proxies. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='TargetHttpProxiesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + + +class TargetHttpProxyList(proto.Message): + r"""A list of TargetHttpProxy resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetHttpProxy]): + A list of TargetHttpProxy resources. + kind (str): + Type of resource. Always + compute#targetHttpProxyList for lists of target + HTTP proxies. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetHttpProxy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetHttpsProxiesScopedList(proto.Message): + r""" + + Attributes: + target_https_proxies (Sequence[google.cloud.compute_v1.types.TargetHttpsProxy]): + A list of TargetHttpsProxies contained in + this scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of backend services when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + target_https_proxies = proto.RepeatedField( + proto.MESSAGE, + number=366607882, + message='TargetHttpsProxy', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetHttpsProxiesSetQuicOverrideRequest(proto.Message): + r""" + + Attributes: + quic_override (str): + QUIC policy for the TargetHttpsProxy + resource. Check the QuicOverride enum for the + list of possible values. + + This field is a member of `oneof`_ ``_quic_override``. + """ + class QuicOverride(proto.Enum): + r"""QUIC policy for the TargetHttpsProxy resource.""" + UNDEFINED_QUIC_OVERRIDE = 0 + DISABLE = 241807048 + ENABLE = 438835587 + NONE = 2402104 + + quic_override = proto.Field( + proto.STRING, + number=456577197, + optional=True, + ) + + +class TargetHttpsProxiesSetSslCertificatesRequest(proto.Message): + r""" + + Attributes: + ssl_certificates (Sequence[str]): + New set of SslCertificate resources to + associate with this TargetHttpsProxy resource. + At least one SSL certificate must be specified. + Currently, you may specify up to 15 SSL + certificates. + """ + + ssl_certificates = proto.RepeatedField( + proto.STRING, + number=366006543, + ) + + +class TargetHttpsProxy(proto.Message): + r"""Represents a Target HTTPS Proxy resource. Google Compute Engine has + two Target HTTPS Proxy resources: \* + `Global `__ \* + `Regional `__ + A target HTTPS proxy is a component of GCP HTTPS load balancers. \* + targetHttpsProxies are used by external HTTPS load balancers. \* + regionTargetHttpsProxies are used by internal HTTPS load balancers. + Forwarding rules reference a target HTTPS proxy, and the target + proxy then references a URL map. For more information, read Using + Target Proxies and Forwarding rule concepts. + + Attributes: + authorization_policy (str): + Optional. A URL referring to a + networksecurity.AuthorizationPolicy resource that describes + how the proxy should authorize inbound traffic. If left + blank, access will not be restricted by an authorization + policy. Refer to the AuthorizationPolicy resource for + additional details. authorizationPolicy only applies to a + global TargetHttpsProxy attached to globalForwardingRules + with the loadBalancingScheme set to INTERNAL_SELF_MANAGED. + Note: This field currently has no impact. + + This field is a member of `oneof`_ ``_authorization_policy``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a TargetHttpsProxy. An + up-to-date fingerprint must be provided in order + to patch the TargetHttpsProxy; otherwise, the + request will fail with error 412 + conditionNotMet. To see the latest fingerprint, + make a get() request to retrieve the + TargetHttpsProxy. + + This field is a member of `oneof`_ ``_fingerprint``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always + compute#targetHttpsProxy for target HTTPS proxies. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + proxy_bind (bool): + This field only applies when the forwarding rule that + references this target proxy has a loadBalancingScheme set + to INTERNAL_SELF_MANAGED. When this field is set to true, + Envoy proxies set up inbound traffic interception and bind + to the IP address and port specified in the forwarding rule. + This is generally useful when using Traffic Director to + configure Envoy as a gateway or middle proxy (in other + words, not a sidecar proxy). The Envoy proxy listens for + inbound requests and handles requests when it receives them. + The default is false. + + This field is a member of `oneof`_ ``_proxy_bind``. + quic_override (str): + Specifies the QUIC override policy for this + TargetHttpsProxy resource. This setting + determines whether the load balancer attempts to + negotiate QUIC with clients. You can specify + NONE, ENABLE, or DISABLE. - When quic-override + is set to NONE, Google manages whether QUIC is + used. - When quic-override is set to ENABLE, the + load balancer uses QUIC when possible. - When + quic-override is set to DISABLE, the load + balancer doesn't use QUIC. - If the quic- + override flag is not specified, NONE is implied. + Check the QuicOverride enum for the list of + possible values. + + This field is a member of `oneof`_ ``_quic_override``. + region (str): + [Output Only] URL of the region where the regional + TargetHttpsProxy resides. This field is not applicable to + global TargetHttpsProxies. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + server_tls_policy (str): + Optional. A URL referring to a + networksecurity.ServerTlsPolicy resource that describes how + the proxy should authenticate inbound traffic. + serverTlsPolicy only applies to a global TargetHttpsProxy + attached to globalForwardingRules with the + loadBalancingScheme set to INTERNAL_SELF_MANAGED. If left + blank, communications are not encrypted. Note: This field + currently has no impact. + + This field is a member of `oneof`_ ``_server_tls_policy``. + ssl_certificates (Sequence[str]): + URLs to SslCertificate resources that are used to + authenticate connections between users and the load + balancer. At least one SSL certificate must be specified. + Currently, you may specify up to 15 SSL certificates. + sslCertificates do not apply when the load balancing scheme + is set to INTERNAL_SELF_MANAGED. + ssl_policy (str): + URL of SslPolicy resource that will be + associated with the TargetHttpsProxy resource. + If not set, the TargetHttpsProxy resource has no + SSL policy configured. + + This field is a member of `oneof`_ ``_ssl_policy``. + url_map (str): + A fully-qualified or valid partial URL to the + UrlMap resource that defines the mapping from + URL to the BackendService. For example, the + following are all valid URLs for specifying a + URL map: - + https://www.googleapis.compute/v1/projects/project/global/urlMaps/ + url-map - projects/project/global/urlMaps/url- + map - global/urlMaps/url-map + + This field is a member of `oneof`_ ``_url_map``. + """ + class QuicOverride(proto.Enum): + r"""Specifies the QUIC override policy for this TargetHttpsProxy + resource. This setting determines whether the load balancer + attempts to negotiate QUIC with clients. You can specify NONE, + ENABLE, or DISABLE. - When quic-override is set to NONE, Google + manages whether QUIC is used. - When quic-override is set to + ENABLE, the load balancer uses QUIC when possible. - When quic- + override is set to DISABLE, the load balancer doesn't use QUIC. + - If the quic-override flag is not specified, NONE is implied. + """ + UNDEFINED_QUIC_OVERRIDE = 0 + DISABLE = 241807048 + ENABLE = 438835587 + NONE = 2402104 + + authorization_policy = proto.Field( + proto.STRING, + number=33945528, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + proxy_bind = proto.Field( + proto.BOOL, + number=286025582, + optional=True, + ) + quic_override = proto.Field( + proto.STRING, + number=456577197, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + server_tls_policy = proto.Field( + proto.STRING, + number=295825266, + optional=True, + ) + ssl_certificates = proto.RepeatedField( + proto.STRING, + number=366006543, + ) + ssl_policy = proto.Field( + proto.STRING, + number=295190213, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + optional=True, + ) + + +class TargetHttpsProxyAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetHttpsProxyAggregatedList.ItemsEntry]): + A list of TargetHttpsProxiesScopedList + resources. + kind (str): + [Output Only] Type of resource. Always + compute#targetHttpsProxyAggregatedList for lists of Target + HTTP Proxies. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='TargetHttpsProxiesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetHttpsProxyList(proto.Message): + r"""Contains a list of TargetHttpsProxy resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetHttpsProxy]): + A list of TargetHttpsProxy resources. + kind (str): + Type of resource. Always + compute#targetHttpsProxyList for lists of target + HTTPS proxies. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetHttpsProxy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetInstance(proto.Message): + r"""Represents a Target Instance resource. You can use a target + instance to handle traffic for one or more forwarding rules, + which is ideal for forwarding protocol traffic that is managed + by a single source. For example, ESP, AH, TCP, or UDP. For more + information, read Target instances. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + instance (str): + A URL to the virtual machine instance that + handles traffic for this target instance. When + creating a target instance, you can provide the + fully-qualified URL or a valid partial URL to + the desired virtual machine. For example, the + following are all valid URLs: - + https://www.googleapis.com/compute/v1/projects/project/zones/zone + /instances/instance - + projects/project/zones/zone/instances/instance - + zones/zone/instances/instance + + This field is a member of `oneof`_ ``_instance``. + kind (str): + [Output Only] The type of the resource. Always + compute#targetInstance for target instances. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + nat_policy (str): + NAT option controlling how IPs are NAT'ed to the instance. + Currently only NO_NAT (default value) is supported. Check + the NatPolicy enum for the list of possible values. + + This field is a member of `oneof`_ ``_nat_policy``. + network (str): + The URL of the network this target instance + uses to forward traffic. If not specified, the + traffic will be forwarded to the network that + the default network interface belongs to. + + This field is a member of `oneof`_ ``_network``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + zone (str): + [Output Only] URL of the zone where the target instance + resides. You must specify this field as part of the HTTP + request URL. It is not settable as a field in the request + body. + + This field is a member of `oneof`_ ``_zone``. + """ + class NatPolicy(proto.Enum): + r"""NAT option controlling how IPs are NAT'ed to the instance. Currently + only NO_NAT (default value) is supported. + """ + UNDEFINED_NAT_POLICY = 0 + NO_NAT = 161455491 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + instance = proto.Field( + proto.STRING, + number=18257045, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + nat_policy = proto.Field( + proto.STRING, + number=509780496, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + optional=True, + ) + + +class TargetInstanceAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetInstanceAggregatedList.ItemsEntry]): + A list of TargetInstance resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='TargetInstancesScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetInstanceList(proto.Message): + r"""Contains a list of TargetInstance resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetInstance]): + A list of TargetInstance resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetInstance', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetInstancesScopedList(proto.Message): + r""" + + Attributes: + target_instances (Sequence[google.cloud.compute_v1.types.TargetInstance]): + A list of target instances contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of addresses when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + target_instances = proto.RepeatedField( + proto.MESSAGE, + number=392915280, + message='TargetInstance', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetPool(proto.Message): + r"""Represents a Target Pool resource. Target pools are used for + network TCP/UDP load balancing. A target pool references member + instances, an associated legacy HttpHealthCheck resource, and, + optionally, a backup target pool. For more information, read + Using target pools. + + Attributes: + backup_pool (str): + The server-defined URL for the resource. This field is + applicable only when the containing target pool is serving a + forwarding rule as the primary pool, and its failoverRatio + field is properly set to a value between [0, 1]. backupPool + and failoverRatio together define the fallback behavior of + the primary target pool: if the ratio of the healthy + instances in the primary pool is at or below failoverRatio, + traffic arriving at the load-balanced IP will be directed to + the backup pool. In case where failoverRatio and backupPool + are not set, or all the instances in the backup pool are + unhealthy, the traffic will be directed back to the primary + pool in the "force" mode, where traffic will be spread to + the healthy instances with the best effort, or to all + instances when no instance is healthy. + + This field is a member of `oneof`_ ``_backup_pool``. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + failover_ratio (float): + This field is applicable only when the containing target + pool is serving a forwarding rule as the primary pool (i.e., + not as a backup pool to some other target pool). The value + of the field must be in [0, 1]. If set, backupPool must also + be set. They together define the fallback behavior of the + primary target pool: if the ratio of the healthy instances + in the primary pool is at or below this number, traffic + arriving at the load-balanced IP will be directed to the + backup pool. In case where failoverRatio is not set or all + the instances in the backup pool are unhealthy, the traffic + will be directed back to the primary pool in the "force" + mode, where traffic will be spread to the healthy instances + with the best effort, or to all instances when no instance + is healthy. + + This field is a member of `oneof`_ ``_failover_ratio``. + health_checks (Sequence[str]): + The URL of the HttpHealthCheck resource. A + member instance in this pool is considered + healthy if and only if the health checks pass. + Only legacy HttpHealthChecks are supported. Only + one health check may be specified. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + instances (Sequence[str]): + A list of resource URLs to the virtual + machine instances serving this pool. They must + live in zones contained in the same region as + this pool. + kind (str): + [Output Only] Type of the resource. Always + compute#targetPool for target pools. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + region (str): + [Output Only] URL of the region where the target pool + resides. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + session_affinity (str): + Session affinity option, must be one of the following + values: NONE: Connections from the same client IP may go to + any instance in the pool. CLIENT_IP: Connections from the + same client IP will go to the same instance in the pool + while that instance remains healthy. CLIENT_IP_PROTO: + Connections from the same client IP with the same IP + protocol will go to the same instance in the pool while that + instance remains healthy. Check the SessionAffinity enum for + the list of possible values. + + This field is a member of `oneof`_ ``_session_affinity``. + """ + class SessionAffinity(proto.Enum): + r"""Session affinity option, must be one of the following values: NONE: + Connections from the same client IP may go to any instance in the + pool. CLIENT_IP: Connections from the same client IP will go to the + same instance in the pool while that instance remains healthy. + CLIENT_IP_PROTO: Connections from the same client IP with the same + IP protocol will go to the same instance in the pool while that + instance remains healthy. + """ + UNDEFINED_SESSION_AFFINITY = 0 + CLIENT_IP = 345665051 + CLIENT_IP_NO_DESTINATION = 106122516 + CLIENT_IP_PORT_PROTO = 221722926 + CLIENT_IP_PROTO = 25322148 + GENERATED_COOKIE = 370321204 + HEADER_FIELD = 200737960 + HTTP_COOKIE = 494981627 + NONE = 2402104 + + backup_pool = proto.Field( + proto.STRING, + number=45884537, + optional=True, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + failover_ratio = proto.Field( + proto.FLOAT, + number=212667006, + optional=True, + ) + health_checks = proto.RepeatedField( + proto.STRING, + number=448370606, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + instances = proto.RepeatedField( + proto.STRING, + number=29097598, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + session_affinity = proto.Field( + proto.STRING, + number=463888561, + optional=True, + ) + + +class TargetPoolAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetPoolAggregatedList.ItemsEntry]): + A list of TargetPool resources. + kind (str): + [Output Only] Type of resource. Always + compute#targetPoolAggregatedList for aggregated lists of + target pools. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='TargetPoolsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetPoolInstanceHealth(proto.Message): + r""" + + Attributes: + health_status (Sequence[google.cloud.compute_v1.types.HealthStatus]): + + kind (str): + [Output Only] Type of resource. Always + compute#targetPoolInstanceHealth when checking the health of + an instance. + + This field is a member of `oneof`_ ``_kind``. + """ + + health_status = proto.RepeatedField( + proto.MESSAGE, + number=380545845, + message='HealthStatus', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + + +class TargetPoolList(proto.Message): + r"""Contains a list of TargetPool resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetPool]): + A list of TargetPool resources. + kind (str): + [Output Only] Type of resource. Always + compute#targetPoolList for lists of target pools. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetPool', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetPoolsAddHealthCheckRequest(proto.Message): + r""" + + Attributes: + health_checks (Sequence[google.cloud.compute_v1.types.HealthCheckReference]): + The HttpHealthCheck to add to the target + pool. + """ + + health_checks = proto.RepeatedField( + proto.MESSAGE, + number=448370606, + message='HealthCheckReference', + ) + + +class TargetPoolsAddInstanceRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.InstanceReference]): + A full or partial URL to an instance to add + to this target pool. This can be a full or + partial URL. For example, the following are + valid URLs: - + https://www.googleapis.com/compute/v1/projects/project- + id/zones/zone /instances/instance-name - + projects/project- + id/zones/zone/instances/instance-name - + zones/zone/instances/instance-name + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='InstanceReference', + ) + + +class TargetPoolsRemoveHealthCheckRequest(proto.Message): + r""" + + Attributes: + health_checks (Sequence[google.cloud.compute_v1.types.HealthCheckReference]): + Health check URL to be removed. This can be a + full or valid partial URL. For example, the + following are valid URLs: - + https://www.googleapis.com/compute/beta/projects/project + /global/httpHealthChecks/health-check - + projects/project/global/httpHealthChecks/health- + check - global/httpHealthChecks/health-check + """ + + health_checks = proto.RepeatedField( + proto.MESSAGE, + number=448370606, + message='HealthCheckReference', + ) + + +class TargetPoolsRemoveInstanceRequest(proto.Message): + r""" + + Attributes: + instances (Sequence[google.cloud.compute_v1.types.InstanceReference]): + URLs of the instances to be removed from + target pool. + """ + + instances = proto.RepeatedField( + proto.MESSAGE, + number=29097598, + message='InstanceReference', + ) + + +class TargetPoolsScopedList(proto.Message): + r""" + + Attributes: + target_pools (Sequence[google.cloud.compute_v1.types.TargetPool]): + A list of target pools contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of addresses when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + target_pools = proto.RepeatedField( + proto.MESSAGE, + number=336072617, + message='TargetPool', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetReference(proto.Message): + r""" + + Attributes: + target (str): + + This field is a member of `oneof`_ ``_target``. + """ + + target = proto.Field( + proto.STRING, + number=192835985, + optional=True, + ) + + +class TargetSslProxiesSetBackendServiceRequest(proto.Message): + r""" + + Attributes: + service (str): + The URL of the new BackendService resource + for the targetSslProxy. + + This field is a member of `oneof`_ ``_service``. + """ + + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + + +class TargetSslProxiesSetProxyHeaderRequest(proto.Message): + r""" + + Attributes: + proxy_header (str): + The new type of proxy header to append before sending data + to the backend. NONE or PROXY_V1 are allowed. Check the + ProxyHeader enum for the list of possible values. + + This field is a member of `oneof`_ ``_proxy_header``. + """ + class ProxyHeader(proto.Enum): + r"""The new type of proxy header to append before sending data to the + backend. NONE or PROXY_V1 are allowed. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + + +class TargetSslProxiesSetSslCertificatesRequest(proto.Message): + r""" + + Attributes: + ssl_certificates (Sequence[str]): + New set of URLs to SslCertificate resources + to associate with this TargetSslProxy. At least + one SSL certificate must be specified. + Currently, you may specify up to 15 SSL + certificates. + """ + + ssl_certificates = proto.RepeatedField( + proto.STRING, + number=366006543, + ) + + +class TargetSslProxy(proto.Message): + r"""Represents a Target SSL Proxy resource. A target SSL proxy is + a component of a SSL Proxy load balancer. Global forwarding + rules reference a target SSL proxy, and the target proxy then + references an external backend service. For more information, + read Using Target Proxies. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#targetSslProxy for target SSL proxies. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + proxy_header (str): + Specifies the type of proxy header to append before sending + data to the backend, either NONE or PROXY_V1. The default is + NONE. Check the ProxyHeader enum for the list of possible + values. + + This field is a member of `oneof`_ ``_proxy_header``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + service (str): + URL to the BackendService resource. + + This field is a member of `oneof`_ ``_service``. + ssl_certificates (Sequence[str]): + URLs to SslCertificate resources that are used to + authenticate connections to Backends. At least one SSL + certificate must be specified. Currently, you may specify up + to 15 SSL certificates. sslCertificates do not apply when + the load balancing scheme is set to INTERNAL_SELF_MANAGED. + ssl_policy (str): + URL of SslPolicy resource that will be + associated with the TargetSslProxy resource. If + not set, the TargetSslProxy resource will not + have any SSL policy configured. + + This field is a member of `oneof`_ ``_ssl_policy``. + """ + class ProxyHeader(proto.Enum): + r"""Specifies the type of proxy header to append before sending data to + the backend, either NONE or PROXY_V1. The default is NONE. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + ssl_certificates = proto.RepeatedField( + proto.STRING, + number=366006543, + ) + ssl_policy = proto.Field( + proto.STRING, + number=295190213, + optional=True, + ) + + +class TargetSslProxyList(proto.Message): + r"""Contains a list of TargetSslProxy resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetSslProxy]): + A list of TargetSslProxy resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetSslProxy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetTcpProxiesSetBackendServiceRequest(proto.Message): + r""" + + Attributes: + service (str): + The URL of the new BackendService resource + for the targetTcpProxy. + + This field is a member of `oneof`_ ``_service``. + """ + + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + + +class TargetTcpProxiesSetProxyHeaderRequest(proto.Message): + r""" + + Attributes: + proxy_header (str): + The new type of proxy header to append before sending data + to the backend. NONE or PROXY_V1 are allowed. Check the + ProxyHeader enum for the list of possible values. + + This field is a member of `oneof`_ ``_proxy_header``. + """ + class ProxyHeader(proto.Enum): + r"""The new type of proxy header to append before sending data to the + backend. NONE or PROXY_V1 are allowed. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + + +class TargetTcpProxy(proto.Message): + r"""Represents a Target TCP Proxy resource. A target TCP proxy is + a component of a TCP Proxy load balancer. Global forwarding + rules reference target TCP proxy, and the target proxy then + references an external backend service. For more information, + read TCP Proxy Load Balancing overview. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always + compute#targetTcpProxy for target TCP proxies. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + proxy_bind (bool): + This field only applies when the forwarding rule that + references this target proxy has a loadBalancingScheme set + to INTERNAL_SELF_MANAGED. When this field is set to true, + Envoy proxies set up inbound traffic interception and bind + to the IP address and port specified in the forwarding rule. + This is generally useful when using Traffic Director to + configure Envoy as a gateway or middle proxy (in other + words, not a sidecar proxy). The Envoy proxy listens for + inbound requests and handles requests when it receives them. + The default is false. + + This field is a member of `oneof`_ ``_proxy_bind``. + proxy_header (str): + Specifies the type of proxy header to append before sending + data to the backend, either NONE or PROXY_V1. The default is + NONE. Check the ProxyHeader enum for the list of possible + values. + + This field is a member of `oneof`_ ``_proxy_header``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + service (str): + URL to the BackendService resource. + + This field is a member of `oneof`_ ``_service``. + """ + class ProxyHeader(proto.Enum): + r"""Specifies the type of proxy header to append before sending data to + the backend, either NONE or PROXY_V1. The default is NONE. + """ + UNDEFINED_PROXY_HEADER = 0 + NONE = 2402104 + PROXY_V1 = 334352940 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + proxy_bind = proto.Field( + proto.BOOL, + number=286025582, + optional=True, + ) + proxy_header = proto.Field( + proto.STRING, + number=160374142, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + + +class TargetTcpProxyList(proto.Message): + r"""Contains a list of TargetTcpProxy resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetTcpProxy]): + A list of TargetTcpProxy resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetTcpProxy', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetVpnGateway(proto.Message): + r"""Represents a Target VPN Gateway resource. The target VPN + gateway resource represents a Classic Cloud VPN gateway. For + more information, read the the Cloud VPN Overview. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + forwarding_rules (Sequence[str]): + [Output Only] A list of URLs to the ForwardingRule + resources. ForwardingRules are created using + compute.forwardingRules.insert and associated with a VPN + gateway. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always + compute#targetVpnGateway for target VPN gateways. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network (str): + URL of the network to which this VPN gateway + is attached. Provided by the client when the VPN + gateway is created. + + This field is a member of `oneof`_ ``_network``. + region (str): + [Output Only] URL of the region where the target VPN gateway + resides. You must specify this field as part of the HTTP + request URL. It is not settable as a field in the request + body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + status (str): + [Output Only] The status of the VPN gateway, which can be + one of the following: CREATING, READY, FAILED, or DELETING. + Check the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + tunnels (Sequence[str]): + [Output Only] A list of URLs to VpnTunnel resources. + VpnTunnels are created using the compute.vpntunnels.insert + method and associated with a VPN gateway. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the VPN gateway, which can be one of the + following: CREATING, READY, FAILED, or DELETING. + """ + UNDEFINED_STATUS = 0 + CREATING = 455564985 + DELETING = 528602024 + FAILED = 455706685 + READY = 77848963 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + forwarding_rules = proto.RepeatedField( + proto.STRING, + number=315821365, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + tunnels = proto.RepeatedField( + proto.STRING, + number=104561931, + ) + + +class TargetVpnGatewayAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetVpnGatewayAggregatedList.ItemsEntry]): + A list of TargetVpnGateway resources. + kind (str): + [Output Only] Type of resource. Always + compute#targetVpnGateway for target VPN gateways. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='TargetVpnGatewaysScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetVpnGatewayList(proto.Message): + r"""Contains a list of TargetVpnGateway resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.TargetVpnGateway]): + A list of TargetVpnGateway resources. + kind (str): + [Output Only] Type of resource. Always + compute#targetVpnGateway for target VPN gateways. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='TargetVpnGateway', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TargetVpnGatewaysScopedList(proto.Message): + r""" + + Attributes: + target_vpn_gateways (Sequence[google.cloud.compute_v1.types.TargetVpnGateway]): + [Output Only] A list of target VPN gateways contained in + this scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of addresses when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + target_vpn_gateways = proto.RepeatedField( + proto.MESSAGE, + number=401770888, + message='TargetVpnGateway', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class TestFailure(proto.Message): + r""" + + Attributes: + actual_output_url (str): + The actual output URL evaluated by load + balancer containing the scheme, host, path and + query parameters. + + This field is a member of `oneof`_ ``_actual_output_url``. + actual_redirect_response_code (int): + Actual HTTP status code for rule with ``urlRedirect`` + calculated by load balancer + + This field is a member of `oneof`_ ``_actual_redirect_response_code``. + actual_service (str): + BackendService or BackendBucket returned by + load balancer. + + This field is a member of `oneof`_ ``_actual_service``. + expected_output_url (str): + The expected output URL evaluated by load + balancer containing the scheme, host, path and + query parameters. + + This field is a member of `oneof`_ ``_expected_output_url``. + expected_redirect_response_code (int): + Expected HTTP status code for rule with ``urlRedirect`` + calculated by load balancer + + This field is a member of `oneof`_ ``_expected_redirect_response_code``. + expected_service (str): + Expected BackendService or BackendBucket + resource the given URL should be mapped to. + + This field is a member of `oneof`_ ``_expected_service``. + headers (Sequence[google.cloud.compute_v1.types.UrlMapTestHeader]): + HTTP headers of the request. + host (str): + Host portion of the URL. + + This field is a member of `oneof`_ ``_host``. + path (str): + Path portion including query parameters in + the URL. + + This field is a member of `oneof`_ ``_path``. + """ + + actual_output_url = proto.Field( + proto.STRING, + number=287075458, + optional=True, + ) + actual_redirect_response_code = proto.Field( + proto.INT32, + number=42926553, + optional=True, + ) + actual_service = proto.Field( + proto.STRING, + number=440379652, + optional=True, + ) + expected_output_url = proto.Field( + proto.STRING, + number=433967384, + optional=True, + ) + expected_redirect_response_code = proto.Field( + proto.INT32, + number=18888047, + optional=True, + ) + expected_service = proto.Field( + proto.STRING, + number=133987374, + optional=True, + ) + headers = proto.RepeatedField( + proto.MESSAGE, + number=258436998, + message='UrlMapTestHeader', + ) + host = proto.Field( + proto.STRING, + number=3208616, + optional=True, + ) + path = proto.Field( + proto.STRING, + number=3433509, + optional=True, + ) + + +class TestIamPermissionsDiskRequest(proto.Message): + r"""A request message for Disks.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class TestIamPermissionsExternalVpnGatewayRequest(proto.Message): + r"""A request message for ExternalVpnGateways.TestIamPermissions. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsFirewallPolicyRequest(proto.Message): + r"""A request message for FirewallPolicies.TestIamPermissions. + See the method description for details. + + Attributes: + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsImageRequest(proto.Message): + r"""A request message for Images.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsInstanceRequest(proto.Message): + r"""A request message for Instances.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class TestIamPermissionsInstanceTemplateRequest(proto.Message): + r"""A request message for InstanceTemplates.TestIamPermissions. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsLicenseCodeRequest(proto.Message): + r"""A request message for LicenseCodes.TestIamPermissions. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsLicenseRequest(proto.Message): + r"""A request message for Licenses.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsNetworkEndpointGroupRequest(proto.Message): + r"""A request message for + NetworkEndpointGroups.TestIamPermissions. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class TestIamPermissionsNodeGroupRequest(proto.Message): + r"""A request message for NodeGroups.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class TestIamPermissionsNodeTemplateRequest(proto.Message): + r"""A request message for NodeTemplates.TestIamPermissions. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsPacketMirroringRequest(proto.Message): + r"""A request message for PacketMirrorings.TestIamPermissions. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsRegionDiskRequest(proto.Message): + r"""A request message for RegionDisks.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsReservationRequest(proto.Message): + r"""A request message for Reservations.TestIamPermissions. See + the method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class TestIamPermissionsResourcePolicyRequest(proto.Message): + r"""A request message for ResourcePolicies.TestIamPermissions. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsServiceAttachmentRequest(proto.Message): + r"""A request message for ServiceAttachments.TestIamPermissions. + See the method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsSnapshotRequest(proto.Message): + r"""A request message for Snapshots.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsSubnetworkRequest(proto.Message): + r"""A request message for Subnetworks.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestIamPermissionsVpnGatewayRequest(proto.Message): + r"""A request message for VpnGateways.TestIamPermissions. See the + method description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + The name of the region for this request. + resource (str): + Name or id of the resource for this request. + test_permissions_request_resource (google.cloud.compute_v1.types.TestPermissionsRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + resource = proto.Field( + proto.STRING, + number=195806222, + ) + test_permissions_request_resource = proto.Field( + proto.MESSAGE, + number=439214758, + message='TestPermissionsRequest', + ) + + +class TestPermissionsRequest(proto.Message): + r""" + + Attributes: + permissions (Sequence[str]): + The set of permissions to check for the 'resource'. + Permissions with wildcards (such as '*' or 'storage.*') are + not allowed. + """ + + permissions = proto.RepeatedField( + proto.STRING, + number=59962500, + ) + + +class TestPermissionsResponse(proto.Message): + r""" + + Attributes: + permissions (Sequence[str]): + A subset of ``TestPermissionsRequest.permissions`` that the + caller is allowed. + """ + + permissions = proto.RepeatedField( + proto.STRING, + number=59962500, + ) + + +class Uint128(proto.Message): + r""" + + Attributes: + high (int): + + This field is a member of `oneof`_ ``_high``. + low (int): + + This field is a member of `oneof`_ ``_low``. + """ + + high = proto.Field( + proto.UINT64, + number=3202466, + optional=True, + ) + low = proto.Field( + proto.UINT64, + number=107348, + optional=True, + ) + + +class UpdateAccessConfigInstanceRequest(proto.Message): + r"""A request message for Instances.UpdateAccessConfig. See the + method description for details. + + Attributes: + access_config_resource (google.cloud.compute_v1.types.AccessConfig): + The body resource for this request + instance (str): + The instance name for this request. + network_interface (str): + The name of the network interface where the + access config is attached. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + access_config_resource = proto.Field( + proto.MESSAGE, + number=387825552, + message='AccessConfig', + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + network_interface = proto.Field( + proto.STRING, + number=365387880, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class UpdateAutoscalerRequest(proto.Message): + r"""A request message for Autoscalers.Update. See the method + description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to update. + + This field is a member of `oneof`_ ``_autoscaler``. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + Name of the zone for this request. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + optional=True, + ) + autoscaler_resource = proto.Field( + proto.MESSAGE, + number=207616118, + message='Autoscaler', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class UpdateBackendBucketRequest(proto.Message): + r"""A request message for BackendBuckets.Update. See the method + description for details. + + Attributes: + backend_bucket (str): + Name of the BackendBucket resource to update. + backend_bucket_resource (google.cloud.compute_v1.types.BackendBucket): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_bucket = proto.Field( + proto.STRING, + number=91714037, + ) + backend_bucket_resource = proto.Field( + proto.MESSAGE, + number=380757784, + message='BackendBucket', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateBackendServiceRequest(proto.Message): + r"""A request message for BackendServices.Update. See the method + description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to + update. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + backend_service_resource = proto.Field( + proto.MESSAGE, + number=347586723, + message='BackendService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateDisplayDeviceInstanceRequest(proto.Message): + r"""A request message for Instances.UpdateDisplayDevice. See the + method description for details. + + Attributes: + display_device_resource (google.cloud.compute_v1.types.DisplayDevice): + The body resource for this request + instance (str): + Name of the instance scoping this request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + display_device_resource = proto.Field( + proto.MESSAGE, + number=289686106, + message='DisplayDevice', + ) + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class UpdateFirewallRequest(proto.Message): + r"""A request message for Firewalls.Update. See the method + description for details. + + Attributes: + firewall (str): + Name of the firewall rule to update. + firewall_resource (google.cloud.compute_v1.types.Firewall): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + firewall = proto.Field( + proto.STRING, + number=511016192, + ) + firewall_resource = proto.Field( + proto.MESSAGE, + number=41425005, + message='Firewall', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateHealthCheckRequest(proto.Message): + r"""A request message for HealthChecks.Update. See the method + description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to update. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + health_check_resource = proto.Field( + proto.MESSAGE, + number=201925032, + message='HealthCheck', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateInstanceRequest(proto.Message): + r"""A request message for Instances.Update. See the method + description for details. + + Attributes: + instance (str): + Name of the instance resource to update. + instance_resource (google.cloud.compute_v1.types.Instance): + The body resource for this request + minimal_action (str): + Specifies the action to take when updating an + instance even if the updated properties do not + require it. If not specified, then Compute + Engine acts based on the minimum action that the + updated properties require. + + This field is a member of `oneof`_ ``_minimal_action``. + most_disruptive_allowed_action (str): + Specifies the most disruptive action that can be taken on + the instance as part of the update. Compute Engine returns + an error if the instance properties require a more + disruptive action as part of the instance update. Valid + options from lowest to highest are NO_EFFECT, REFRESH, and + RESTART. + + This field is a member of `oneof`_ ``_most_disruptive_allowed_action``. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + instance_resource = proto.Field( + proto.MESSAGE, + number=215988344, + message='Instance', + ) + minimal_action = proto.Field( + proto.STRING, + number=270567060, + optional=True, + ) + most_disruptive_allowed_action = proto.Field( + proto.STRING, + number=66103053, + optional=True, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class UpdateNetworkInterfaceInstanceRequest(proto.Message): + r"""A request message for Instances.UpdateNetworkInterface. See + the method description for details. + + Attributes: + instance (str): + The instance name for this request. + network_interface (str): + The name of the network interface to update. + network_interface_resource (google.cloud.compute_v1.types.NetworkInterface): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + network_interface = proto.Field( + proto.STRING, + number=365387880, + ) + network_interface_resource = proto.Field( + proto.MESSAGE, + number=325814789, + message='NetworkInterface', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class UpdatePeeringNetworkRequest(proto.Message): + r"""A request message for Networks.UpdatePeering. See the method + description for details. + + Attributes: + network (str): + Name of the network resource which the + updated peering is belonging to. + networks_update_peering_request_resource (google.cloud.compute_v1.types.NetworksUpdatePeeringRequest): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + network = proto.Field( + proto.STRING, + number=232872494, + ) + networks_update_peering_request_resource = proto.Field( + proto.MESSAGE, + number=224433497, + message='NetworksUpdatePeeringRequest', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdatePerInstanceConfigsInstanceGroupManagerRequest(proto.Message): + r"""A request message for + InstanceGroupManagers.UpdatePerInstanceConfigs. See the method + description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + instance_group_managers_update_per_instance_configs_req_resource (google.cloud.compute_v1.types.InstanceGroupManagersUpdatePerInstanceConfigsReq): + The body resource for this request + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + zone (str): + The name of the zone where the managed + instance group is located. It should conform to + RFC1035. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + instance_group_managers_update_per_instance_configs_req_resource = proto.Field( + proto.MESSAGE, + number=141402302, + message='InstanceGroupManagersUpdatePerInstanceConfigsReq', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest(proto.Message): + r"""A request message for + RegionInstanceGroupManagers.UpdatePerInstanceConfigs. See the + method description for details. + + Attributes: + instance_group_manager (str): + The name of the managed instance group. It + should conform to RFC1035. + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request, + should conform to RFC1035. + region_instance_group_manager_update_instance_config_req_resource (google.cloud.compute_v1.types.RegionInstanceGroupManagerUpdateInstanceConfigReq): + The body resource for this request + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + instance_group_manager = proto.Field( + proto.STRING, + number=249363395, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_instance_group_manager_update_instance_config_req_resource = proto.Field( + proto.MESSAGE, + number=89036583, + message='RegionInstanceGroupManagerUpdateInstanceConfigReq', + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateRegionAutoscalerRequest(proto.Message): + r"""A request message for RegionAutoscalers.Update. See the + method description for details. + + Attributes: + autoscaler (str): + Name of the autoscaler to update. + + This field is a member of `oneof`_ ``_autoscaler``. + autoscaler_resource (google.cloud.compute_v1.types.Autoscaler): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + autoscaler = proto.Field( + proto.STRING, + number=517258967, + optional=True, + ) + autoscaler_resource = proto.Field( + proto.MESSAGE, + number=207616118, + message='Autoscaler', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateRegionBackendServiceRequest(proto.Message): + r"""A request message for RegionBackendServices.Update. See the + method description for details. + + Attributes: + backend_service (str): + Name of the BackendService resource to + update. + backend_service_resource (google.cloud.compute_v1.types.BackendService): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + ) + backend_service_resource = proto.Field( + proto.MESSAGE, + number=347586723, + message='BackendService', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateRegionHealthCheckRequest(proto.Message): + r"""A request message for RegionHealthChecks.Update. See the + method description for details. + + Attributes: + health_check (str): + Name of the HealthCheck resource to update. + health_check_resource (google.cloud.compute_v1.types.HealthCheck): + The body resource for this request + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + """ + + health_check = proto.Field( + proto.STRING, + number=308876645, + ) + health_check_resource = proto.Field( + proto.MESSAGE, + number=201925032, + message='HealthCheck', + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + + +class UpdateRegionUrlMapRequest(proto.Message): + r"""A request message for RegionUrlMaps.Update. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + request_id (str): + begin_interface: MixerMutationRequestBuilder Request ID to + support idempotency. + + This field is a member of `oneof`_ ``_request_id``. + url_map (str): + Name of the UrlMap resource to update. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + url_map_resource = proto.Field( + proto.MESSAGE, + number=168675425, + message='UrlMap', + ) + + +class UpdateRouterRequest(proto.Message): + r"""A request message for Routers.Update. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + router (str): + Name of the Router resource to update. + router_resource (google.cloud.compute_v1.types.Router): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + router = proto.Field( + proto.STRING, + number=148608841, + ) + router_resource = proto.Field( + proto.MESSAGE, + number=155222084, + message='Router', + ) + + +class UpdateShieldedInstanceConfigInstanceRequest(proto.Message): + r"""A request message for Instances.UpdateShieldedInstanceConfig. + See the method description for details. + + Attributes: + instance (str): + Name or id of the instance scoping this + request. + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + shielded_instance_config_resource (google.cloud.compute_v1.types.ShieldedInstanceConfig): + The body resource for this request + zone (str): + The name of the zone for this request. + """ + + instance = proto.Field( + proto.STRING, + number=18257045, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + shielded_instance_config_resource = proto.Field( + proto.MESSAGE, + number=272059224, + message='ShieldedInstanceConfig', + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class UpdateUrlMapRequest(proto.Message): + r"""A request message for UrlMaps.Update. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + request_id (str): + An optional request ID to identify requests. + Specify a unique request ID so that if you must + retry your request, the server will know to + ignore the request if it has already been + completed. For example, consider a situation + where you make an initial request and the + request times out. If you make the request again + with the same request ID, the server can check + if original operation with the same request ID + was received, and if so, will ignore the second + request. This prevents clients from accidentally + creating duplicate commitments. The request ID + must be a valid UUID with the exception that + zero UUID is not supported ( + 00000000-0000-0000-0000-000000000000). + + This field is a member of `oneof`_ ``_request_id``. + url_map (str): + Name of the UrlMap resource to update. + url_map_resource (google.cloud.compute_v1.types.UrlMap): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + request_id = proto.Field( + proto.STRING, + number=37109963, + optional=True, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + url_map_resource = proto.Field( + proto.MESSAGE, + number=168675425, + message='UrlMap', + ) + + +class UrlMap(proto.Message): + r"""Represents a URL Map resource. Google Compute Engine has two URL Map + resources: \* `Global `__ + \* `Regional `__ A + URL map resource is a component of certain types of GCP load + balancers and Traffic Director. \* urlMaps are used by external + HTTP(S) load balancers and Traffic Director. \* regionUrlMaps are + used by internal HTTP(S) load balancers. For a list of supported URL + map features by load balancer type, see the Load balancing features: + Routing and traffic management table. For a list of supported URL + map features for Traffic Director, see the Traffic Director + features: Routing and traffic management table. This resource + defines mappings from host names and URL paths to either a backend + service or a backend bucket. To use the global urlMaps resource, the + backend service must have a loadBalancingScheme of either EXTERNAL + or INTERNAL_SELF_MANAGED. To use the regionUrlMaps resource, the + backend service must have a loadBalancingScheme of INTERNAL_MANAGED. + For more information, read URL Map Concepts. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + default_route_action (google.cloud.compute_v1.types.HttpRouteAction): + defaultRouteAction takes effect when none of + the hostRules match. The load balancer performs + advanced routing actions like URL rewrites, + header transformations, etc. prior to forwarding + the request to the selected backend. If + defaultRouteAction specifies any + weightedBackendServices, defaultService must not + be set. Conversely if defaultService is set, + defaultRouteAction cannot contain any + weightedBackendServices. Only one of + defaultRouteAction or defaultUrlRedirect must be + set. UrlMaps for external HTTP(S) load balancers + support only the urlRewrite action within + defaultRouteAction. defaultRouteAction has no + effect when the URL map is bound to target gRPC + proxy that has validateForProxyless field set to + true. + + This field is a member of `oneof`_ ``_default_route_action``. + default_service (str): + The full or partial URL of the defaultService + resource to which traffic is directed if none of + the hostRules match. If defaultRouteAction is + additionally specified, advanced routing actions + like URL Rewrites, etc. take effect prior to + sending the request to the backend. However, if + defaultService is specified, defaultRouteAction + cannot contain any weightedBackendServices. + Conversely, if routeAction specifies any + weightedBackendServices, service must not be + specified. Only one of defaultService, + defaultUrlRedirect or + defaultRouteAction.weightedBackendService must + be set. defaultService has no effect when the + URL map is bound to target gRPC proxy that has + validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_default_service``. + default_url_redirect (google.cloud.compute_v1.types.HttpRedirectAction): + When none of the specified hostRules match, + the request is redirected to a URL specified by + defaultUrlRedirect. If defaultUrlRedirect is + specified, defaultService or defaultRouteAction + must not be set. Not supported when the URL map + is bound to target gRPC proxy. + + This field is a member of `oneof`_ ``_default_url_redirect``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + fingerprint (str): + Fingerprint of this resource. A hash of the + contents stored in this object. This field is + used in optimistic locking. This field will be + ignored when inserting a UrlMap. An up-to-date + fingerprint must be provided in order to update + the UrlMap, otherwise the request will fail with + error 412 conditionNotMet. To see the latest + fingerprint, make a get() request to retrieve a + UrlMap. + + This field is a member of `oneof`_ ``_fingerprint``. + header_action (google.cloud.compute_v1.types.HttpHeaderAction): + Specifies changes to request and response + headers that need to take effect for the + selected backendService. The headerAction + specified here take effect after headerAction + specified under pathMatcher. Note that + headerAction is not supported for Loadbalancers + that have their loadBalancingScheme set to + EXTERNAL. Not supported when the URL map is + bound to target gRPC proxy that has + validateForProxyless field set to true. + + This field is a member of `oneof`_ ``_header_action``. + host_rules (Sequence[google.cloud.compute_v1.types.HostRule]): + The list of HostRules to use against the URL. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#urlMaps + for url maps. + + This field is a member of `oneof`_ ``_kind``. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + path_matchers (Sequence[google.cloud.compute_v1.types.PathMatcher]): + The list of named PathMatchers to use against + the URL. + region (str): + [Output Only] URL of the region where the regional URL map + resides. This field is not applicable to global URL maps. + You must specify this field as part of the HTTP request URL. + It is not settable as a field in the request body. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + tests (Sequence[google.cloud.compute_v1.types.UrlMapTest]): + The list of expected URL mapping tests. + Request to update this UrlMap will succeed only + if all of the test cases pass. You can specify a + maximum of 100 tests per UrlMap. Not supported + when the URL map is bound to target gRPC proxy + that has validateForProxyless field set to true. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + default_route_action = proto.Field( + proto.MESSAGE, + number=378919466, + optional=True, + message='HttpRouteAction', + ) + default_service = proto.Field( + proto.STRING, + number=370242231, + optional=True, + ) + default_url_redirect = proto.Field( + proto.MESSAGE, + number=359503338, + optional=True, + message='HttpRedirectAction', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + fingerprint = proto.Field( + proto.STRING, + number=234678500, + optional=True, + ) + header_action = proto.Field( + proto.MESSAGE, + number=328077352, + optional=True, + message='HttpHeaderAction', + ) + host_rules = proto.RepeatedField( + proto.MESSAGE, + number=311804832, + message='HostRule', + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + path_matchers = proto.RepeatedField( + proto.MESSAGE, + number=271664219, + message='PathMatcher', + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + tests = proto.RepeatedField( + proto.MESSAGE, + number=110251553, + message='UrlMapTest', + ) + + +class UrlMapList(proto.Message): + r"""Contains a list of UrlMap resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.UrlMap]): + A list of UrlMap resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='UrlMap', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class UrlMapReference(proto.Message): + r""" + + Attributes: + url_map (str): + + This field is a member of `oneof`_ ``_url_map``. + """ + + url_map = proto.Field( + proto.STRING, + number=367020684, + optional=True, + ) + + +class UrlMapTest(proto.Message): + r"""Message for the expected URL mappings. + + Attributes: + description (str): + Description of this test case. + + This field is a member of `oneof`_ ``_description``. + expected_output_url (str): + The expected output URL evaluated by load balancer + containing the scheme, host, path and query parameters. For + rules that forward requests to backends, the test passes + only when expectedOutputUrl matches the request forwarded by + load balancer to backends. For rules with urlRewrite, the + test verifies that the forwarded request matches hostRewrite + and pathPrefixRewrite in the urlRewrite action. When service + is specified, expectedOutputUrl`s scheme is ignored. For + rules with urlRedirect, the test passes only if + expectedOutputUrl matches the URL in the load balancer's + redirect response. If urlRedirect specifies https_redirect, + the test passes only if the scheme in expectedOutputUrl is + also set to https. If urlRedirect specifies strip_query, the + test passes only if expectedOutputUrl does not contain any + query parameters. expectedOutputUrl is optional when service + is specified. + + This field is a member of `oneof`_ ``_expected_output_url``. + expected_redirect_response_code (int): + For rules with urlRedirect, the test passes + only if expectedRedirectResponseCode matches the + HTTP status code in load balancer's redirect + response. expectedRedirectResponseCode cannot be + set when service is set. + + This field is a member of `oneof`_ ``_expected_redirect_response_code``. + headers (Sequence[google.cloud.compute_v1.types.UrlMapTestHeader]): + HTTP headers for this request. If headers + contains a host header, then host must also + match the header value. + host (str): + Host portion of the URL. If headers contains + a host header, then host must also match the + header value. + + This field is a member of `oneof`_ ``_host``. + path (str): + Path portion of the URL. + + This field is a member of `oneof`_ ``_path``. + service (str): + Expected BackendService or BackendBucket + resource the given URL should be mapped to. + service cannot be set if + expectedRedirectResponseCode is set. + + This field is a member of `oneof`_ ``_service``. + """ + + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + expected_output_url = proto.Field( + proto.STRING, + number=433967384, + optional=True, + ) + expected_redirect_response_code = proto.Field( + proto.INT32, + number=18888047, + optional=True, + ) + headers = proto.RepeatedField( + proto.MESSAGE, + number=258436998, + message='UrlMapTestHeader', + ) + host = proto.Field( + proto.STRING, + number=3208616, + optional=True, + ) + path = proto.Field( + proto.STRING, + number=3433509, + optional=True, + ) + service = proto.Field( + proto.STRING, + number=373540533, + optional=True, + ) + + +class UrlMapTestHeader(proto.Message): + r"""HTTP headers used in UrlMapTests. + + Attributes: + name (str): + Header name. + + This field is a member of `oneof`_ ``_name``. + value (str): + Header value. + + This field is a member of `oneof`_ ``_value``. + """ + + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + value = proto.Field( + proto.STRING, + number=111972721, + optional=True, + ) + + +class UrlMapValidationResult(proto.Message): + r"""Message representing the validation result for a UrlMap. + + Attributes: + load_errors (Sequence[str]): + + load_succeeded (bool): + Whether the given UrlMap can be successfully + loaded. If false, 'loadErrors' indicates the + reasons. + + This field is a member of `oneof`_ ``_load_succeeded``. + test_failures (Sequence[google.cloud.compute_v1.types.TestFailure]): + + test_passed (bool): + If successfully loaded, this field indicates + whether the test passed. If false, + 'testFailures's indicate the reason of failure. + + This field is a member of `oneof`_ ``_test_passed``. + """ + + load_errors = proto.RepeatedField( + proto.STRING, + number=310147300, + ) + load_succeeded = proto.Field( + proto.BOOL, + number=128326216, + optional=True, + ) + test_failures = proto.RepeatedField( + proto.MESSAGE, + number=505934134, + message='TestFailure', + ) + test_passed = proto.Field( + proto.BOOL, + number=192708797, + optional=True, + ) + + +class UrlMapsAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.UrlMapsAggregatedList.ItemsEntry]): + A list of UrlMapsScopedList resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='UrlMapsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class UrlMapsScopedList(proto.Message): + r""" + + Attributes: + url_maps (Sequence[google.cloud.compute_v1.types.UrlMap]): + A list of UrlMaps contained in this scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of backend services when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + url_maps = proto.RepeatedField( + proto.MESSAGE, + number=103352167, + message='UrlMap', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class UrlMapsValidateRequest(proto.Message): + r""" + + Attributes: + resource (google.cloud.compute_v1.types.UrlMap): + Content of the UrlMap to be validated. + + This field is a member of `oneof`_ ``_resource``. + """ + + resource = proto.Field( + proto.MESSAGE, + number=195806222, + optional=True, + message='UrlMap', + ) + + +class UrlMapsValidateResponse(proto.Message): + r""" + + Attributes: + result (google.cloud.compute_v1.types.UrlMapValidationResult): + + This field is a member of `oneof`_ ``_result``. + """ + + result = proto.Field( + proto.MESSAGE, + number=139315229, + optional=True, + message='UrlMapValidationResult', + ) + + +class UrlRewrite(proto.Message): + r"""The spec for modifying the path before sending the request to + the matched backend service. + + Attributes: + host_rewrite (str): + Prior to forwarding the request to the + selected service, the request's host header is + replaced with contents of hostRewrite. The value + must be between 1 and 255 characters. + + This field is a member of `oneof`_ ``_host_rewrite``. + path_prefix_rewrite (str): + Prior to forwarding the request to the + selected backend service, the matching portion + of the request's path is replaced by + pathPrefixRewrite. The value must be between 1 + and 1024 characters. + + This field is a member of `oneof`_ ``_path_prefix_rewrite``. + """ + + host_rewrite = proto.Field( + proto.STRING, + number=159819253, + optional=True, + ) + path_prefix_rewrite = proto.Field( + proto.STRING, + number=41186361, + optional=True, + ) + + +class UsableSubnetwork(proto.Message): + r"""Subnetwork which the current user has compute.subnetworks.use + permission on. + + Attributes: + ip_cidr_range (str): + The range of internal addresses that are + owned by this subnetwork. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + network (str): + Network URL. + + This field is a member of `oneof`_ ``_network``. + secondary_ip_ranges (Sequence[google.cloud.compute_v1.types.UsableSubnetworkSecondaryRange]): + Secondary IP ranges. + subnetwork (str): + Subnetwork URL. + + This field is a member of `oneof`_ ``_subnetwork``. + """ + + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + secondary_ip_ranges = proto.RepeatedField( + proto.MESSAGE, + number=136658915, + message='UsableSubnetworkSecondaryRange', + ) + subnetwork = proto.Field( + proto.STRING, + number=307827694, + optional=True, + ) + + +class UsableSubnetworkSecondaryRange(proto.Message): + r"""Secondary IP range of a usable subnetwork. + + Attributes: + ip_cidr_range (str): + The range of IP addresses belonging to this + subnetwork secondary range. + + This field is a member of `oneof`_ ``_ip_cidr_range``. + range_name (str): + The name associated with this subnetwork + secondary range, used when adding an alias IP + range to a VM instance. The name must be 1-63 + characters long, and comply with RFC1035. The + name must be unique within the subnetwork. + + This field is a member of `oneof`_ ``_range_name``. + """ + + ip_cidr_range = proto.Field( + proto.STRING, + number=98117322, + optional=True, + ) + range_name = proto.Field( + proto.STRING, + number=332216397, + optional=True, + ) + + +class UsableSubnetworksAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.UsableSubnetwork]): + [Output] A list of usable subnetwork URLs. + kind (str): + [Output Only] Type of resource. Always + compute#usableSubnetworksAggregatedList for aggregated lists + of usable subnetworks. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. In special cases + listUsable may return 0 subnetworks and nextPageToken which + still should be used to get the next page of results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='UsableSubnetwork', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class UsageExportLocation(proto.Message): + r"""The location in Cloud Storage and naming method of the daily usage + report. Contains bucket_name and report_name prefix. + + Attributes: + bucket_name (str): + The name of an existing bucket in Cloud + Storage where the usage report object is stored. + The Google Service Account is granted write + access to this bucket. This can either be the + bucket name by itself, such as example-bucket, + or the bucket name with gs:// or + https://storage.googleapis.com/ in front of it, + such as gs://example-bucket. + + This field is a member of `oneof`_ ``_bucket_name``. + report_name_prefix (str): + An optional prefix for the name of the usage report object + stored in bucketName. If not supplied, defaults to + usage_gce. The report is stored as a CSV file named + report_name_prefix_gce_YYYYMMDD.csv where YYYYMMDD is the + day of the usage according to Pacific Time. If you supply a + prefix, it should conform to Cloud Storage object naming + conventions. + + This field is a member of `oneof`_ ``_report_name_prefix``. + """ + + bucket_name = proto.Field( + proto.STRING, + number=283610048, + optional=True, + ) + report_name_prefix = proto.Field( + proto.STRING, + number=320198715, + optional=True, + ) + + +class ValidateRegionUrlMapRequest(proto.Message): + r"""A request message for RegionUrlMaps.Validate. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + region (str): + Name of the region scoping this request. + region_url_maps_validate_request_resource (google.cloud.compute_v1.types.RegionUrlMapsValidateRequest): + The body resource for this request + url_map (str): + Name of the UrlMap resource to be validated + as. + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + region_url_maps_validate_request_resource = proto.Field( + proto.MESSAGE, + number=56632858, + message='RegionUrlMapsValidateRequest', + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + + +class ValidateUrlMapRequest(proto.Message): + r"""A request message for UrlMaps.Validate. See the method + description for details. + + Attributes: + project (str): + Project ID for this request. + url_map (str): + Name of the UrlMap resource to be validated + as. + url_maps_validate_request_resource (google.cloud.compute_v1.types.UrlMapsValidateRequest): + The body resource for this request + """ + + project = proto.Field( + proto.STRING, + number=227560217, + ) + url_map = proto.Field( + proto.STRING, + number=367020684, + ) + url_maps_validate_request_resource = proto.Field( + proto.MESSAGE, + number=395913455, + message='UrlMapsValidateRequest', + ) + + +class VmEndpointNatMappings(proto.Message): + r"""Contain information of Nat mapping for a VM endpoint (i.e., + NIC). + + Attributes: + instance_name (str): + Name of the VM instance which the endpoint + belongs to + + This field is a member of `oneof`_ ``_instance_name``. + interface_nat_mappings (Sequence[google.cloud.compute_v1.types.VmEndpointNatMappingsInterfaceNatMappings]): + + """ + + instance_name = proto.Field( + proto.STRING, + number=227947509, + optional=True, + ) + interface_nat_mappings = proto.RepeatedField( + proto.MESSAGE, + number=256196617, + message='VmEndpointNatMappingsInterfaceNatMappings', + ) + + +class VmEndpointNatMappingsInterfaceNatMappings(proto.Message): + r"""Contain information of Nat mapping for an interface of this + endpoint. + + Attributes: + drain_nat_ip_port_ranges (Sequence[str]): + List of all drain IP:port-range mappings assigned to this + interface. These ranges are inclusive, that is, both the + first and the last ports can be used for NAT. Example: + ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + nat_ip_port_ranges (Sequence[str]): + A list of all IP:port-range mappings assigned to this + interface. These ranges are inclusive, that is, both the + first and the last ports can be used for NAT. Example: + ["2.2.2.2:12345-12355", "1.1.1.1:2234-2234"]. + num_total_drain_nat_ports (int): + Total number of drain ports across all NAT IPs allocated to + this interface. It equals to the aggregated port number in + the field drain_nat_ip_port_ranges. + + This field is a member of `oneof`_ ``_num_total_drain_nat_ports``. + num_total_nat_ports (int): + Total number of ports across all NAT IPs allocated to this + interface. It equals to the aggregated port number in the + field nat_ip_port_ranges. + + This field is a member of `oneof`_ ``_num_total_nat_ports``. + source_alias_ip_range (str): + Alias IP range for this interface endpoint. + It will be a private (RFC 1918) IP range. + Examples: "10.33.4.55/32", or "192.168.5.0/24". + + This field is a member of `oneof`_ ``_source_alias_ip_range``. + source_virtual_ip (str): + Primary IP of the VM for this NIC. + + This field is a member of `oneof`_ ``_source_virtual_ip``. + """ + + drain_nat_ip_port_ranges = proto.RepeatedField( + proto.STRING, + number=395440577, + ) + nat_ip_port_ranges = proto.RepeatedField( + proto.STRING, + number=531830810, + ) + num_total_drain_nat_ports = proto.Field( + proto.INT32, + number=335532793, + optional=True, + ) + num_total_nat_ports = proto.Field( + proto.INT32, + number=299904384, + optional=True, + ) + source_alias_ip_range = proto.Field( + proto.STRING, + number=440340952, + optional=True, + ) + source_virtual_ip = proto.Field( + proto.STRING, + number=149836159, + optional=True, + ) + + +class VmEndpointNatMappingsList(proto.Message): + r"""Contains a list of VmEndpointNatMappings. + + Attributes: + id (str): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always + compute#vmEndpointNatMappingsList for lists of Nat mappings + of VM endpoints. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + result (Sequence[google.cloud.compute_v1.types.VmEndpointNatMappings]): + [Output Only] A list of Nat mapping information of VM + endpoints. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + result = proto.RepeatedField( + proto.MESSAGE, + number=139315229, + message='VmEndpointNatMappings', + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class VpnGateway(proto.Message): + r"""Represents a HA VPN gateway. HA VPN is a high-availability + (HA) Cloud VPN solution that lets you securely connect your on- + premises network to your Google Cloud Virtual Private Cloud + network through an IPsec VPN connection in a single region. For + more information about Cloud HA VPN solutions, see Cloud VPN + topologies . + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of resource. Always compute#vpnGateway + for VPN gateways. + + This field is a member of `oneof`_ ``_kind``. + label_fingerprint (str): + A fingerprint for the labels being applied to + this VpnGateway, which is essentially a hash of + the labels set used for optimistic locking. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash in order to update or + change labels, otherwise the request will fail + with error 412 conditionNotMet. To see the + latest fingerprint, make a get() request to + retrieve an VpnGateway. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.VpnGateway.LabelsEntry]): + Labels for this resource. These can only be + added or modified by the setLabels method. Each + label key/value pair must comply with RFC1035. + Label values may be empty. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + network (str): + URL of the network to which this VPN gateway + is attached. Provided by the client when the VPN + gateway is created. + + This field is a member of `oneof`_ ``_network``. + region (str): + [Output Only] URL of the region where the VPN gateway + resides. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + vpn_interfaces (Sequence[google.cloud.compute_v1.types.VpnGatewayVpnGatewayInterface]): + The list of VPN interfaces associated with + this VPN gateway. + """ + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + network = proto.Field( + proto.STRING, + number=232872494, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + vpn_interfaces = proto.RepeatedField( + proto.MESSAGE, + number=91842181, + message='VpnGatewayVpnGatewayInterface', + ) + + +class VpnGatewayAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.VpnGatewayAggregatedList.ItemsEntry]): + A list of VpnGateway resources. + kind (str): + [Output Only] Type of resource. Always compute#vpnGateway + for VPN gateways. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='VpnGatewaysScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class VpnGatewayList(proto.Message): + r"""Contains a list of VpnGateway resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.VpnGateway]): + A list of VpnGateway resources. + kind (str): + [Output Only] Type of resource. Always compute#vpnGateway + for VPN gateways. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='VpnGateway', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class VpnGatewayStatus(proto.Message): + r""" + + Attributes: + vpn_connections (Sequence[google.cloud.compute_v1.types.VpnGatewayStatusVpnConnection]): + List of VPN connection for this VpnGateway. + """ + + vpn_connections = proto.RepeatedField( + proto.MESSAGE, + number=439334538, + message='VpnGatewayStatusVpnConnection', + ) + + +class VpnGatewayStatusHighAvailabilityRequirementState(proto.Message): + r"""Describes the high availability requirement state for the VPN + connection between this Cloud VPN gateway and a peer gateway. + + Attributes: + state (str): + Indicates the high availability requirement state for the + VPN connection. Valid values are CONNECTION_REDUNDANCY_MET, + CONNECTION_REDUNDANCY_NOT_MET. Check the State enum for the + list of possible values. + + This field is a member of `oneof`_ ``_state``. + unsatisfied_reason (str): + Indicates the reason why the VPN connection does not meet + the high availability redundancy criteria/requirement. Valid + values is INCOMPLETE_TUNNELS_COVERAGE. Check the + UnsatisfiedReason enum for the list of possible values. + + This field is a member of `oneof`_ ``_unsatisfied_reason``. + """ + class State(proto.Enum): + r"""Indicates the high availability requirement state for the VPN + connection. Valid values are CONNECTION_REDUNDANCY_MET, + CONNECTION_REDUNDANCY_NOT_MET. + """ + UNDEFINED_STATE = 0 + CONNECTION_REDUNDANCY_MET = 505242907 + CONNECTION_REDUNDANCY_NOT_MET = 511863311 + + class UnsatisfiedReason(proto.Enum): + r"""Indicates the reason why the VPN connection does not meet the high + availability redundancy criteria/requirement. Valid values is + INCOMPLETE_TUNNELS_COVERAGE. + """ + UNDEFINED_UNSATISFIED_REASON = 0 + INCOMPLETE_TUNNELS_COVERAGE = 55917437 + + state = proto.Field( + proto.STRING, + number=109757585, + optional=True, + ) + unsatisfied_reason = proto.Field( + proto.STRING, + number=55016330, + optional=True, + ) + + +class VpnGatewayStatusTunnel(proto.Message): + r"""Contains some information about a VPN tunnel. + + Attributes: + local_gateway_interface (int): + The VPN gateway interface this VPN tunnel is + associated with. + + This field is a member of `oneof`_ ``_local_gateway_interface``. + peer_gateway_interface (int): + The peer gateway interface this VPN tunnel is + connected to, the peer gateway could either be + an external VPN gateway or GCP VPN gateway. + + This field is a member of `oneof`_ ``_peer_gateway_interface``. + tunnel_url (str): + URL reference to the VPN tunnel. + + This field is a member of `oneof`_ ``_tunnel_url``. + """ + + local_gateway_interface = proto.Field( + proto.UINT32, + number=158764330, + optional=True, + ) + peer_gateway_interface = proto.Field( + proto.UINT32, + number=214380385, + optional=True, + ) + tunnel_url = proto.Field( + proto.STRING, + number=78975256, + optional=True, + ) + + +class VpnGatewayStatusVpnConnection(proto.Message): + r"""A VPN connection contains all VPN tunnels connected from this + VpnGateway to the same peer gateway. The peer gateway could + either be a external VPN gateway or GCP VPN gateway. + + Attributes: + peer_external_gateway (str): + URL reference to the peer external VPN gateways to which the + VPN tunnels in this VPN connection are connected. This field + is mutually exclusive with peer_gcp_gateway. + + This field is a member of `oneof`_ ``_peer_external_gateway``. + peer_gcp_gateway (str): + URL reference to the peer side VPN gateways to which the VPN + tunnels in this VPN connection are connected. This field is + mutually exclusive with peer_gcp_gateway. + + This field is a member of `oneof`_ ``_peer_gcp_gateway``. + state (google.cloud.compute_v1.types.VpnGatewayStatusHighAvailabilityRequirementState): + HighAvailabilityRequirementState for the VPN + connection. + + This field is a member of `oneof`_ ``_state``. + tunnels (Sequence[google.cloud.compute_v1.types.VpnGatewayStatusTunnel]): + List of VPN tunnels that are in this VPN + connection. + """ + + peer_external_gateway = proto.Field( + proto.STRING, + number=384956173, + optional=True, + ) + peer_gcp_gateway = proto.Field( + proto.STRING, + number=281867452, + optional=True, + ) + state = proto.Field( + proto.MESSAGE, + number=109757585, + optional=True, + message='VpnGatewayStatusHighAvailabilityRequirementState', + ) + tunnels = proto.RepeatedField( + proto.MESSAGE, + number=104561931, + message='VpnGatewayStatusTunnel', + ) + + +class VpnGatewayVpnGatewayInterface(proto.Message): + r"""A VPN gateway interface. + + Attributes: + id (int): + [Output Only] Numeric identifier for this VPN interface + associated with the VPN gateway. + + This field is a member of `oneof`_ ``_id``. + interconnect_attachment (str): + URL of the VLAN attachment + (interconnectAttachment) resource for this VPN + gateway interface. When the value of this field + is present, the VPN gateway is used for IPsec- + encrypted Cloud Interconnect; all egress or + ingress traffic for this VPN gateway interface + goes through the specified VLAN attachment + resource. Not currently available publicly. + + This field is a member of `oneof`_ ``_interconnect_attachment``. + ip_address (str): + [Output Only] IP address for this VPN interface associated + with the VPN gateway. The IP address could be either a + regional external IP address or a regional internal IP + address. The two IP addresses for a VPN gateway must be all + regional external or regional internal IP addresses. There + cannot be a mix of regional external IP addresses and + regional internal IP addresses. For IPsec-encrypted Cloud + Interconnect, the IP addresses for both interfaces could + either be regional internal IP addresses or regional + external IP addresses. For regular (non IPsec-encrypted + Cloud Interconnect) HA VPN tunnels, the IP address must be a + regional external IP address. + + This field is a member of `oneof`_ ``_ip_address``. + """ + + id = proto.Field( + proto.UINT32, + number=3355, + optional=True, + ) + interconnect_attachment = proto.Field( + proto.STRING, + number=308135284, + optional=True, + ) + ip_address = proto.Field( + proto.STRING, + number=406272220, + optional=True, + ) + + +class VpnGatewaysGetStatusResponse(proto.Message): + r""" + + Attributes: + result (google.cloud.compute_v1.types.VpnGatewayStatus): + + This field is a member of `oneof`_ ``_result``. + """ + + result = proto.Field( + proto.MESSAGE, + number=139315229, + optional=True, + message='VpnGatewayStatus', + ) + + +class VpnGatewaysScopedList(proto.Message): + r""" + + Attributes: + vpn_gateways (Sequence[google.cloud.compute_v1.types.VpnGateway]): + [Output Only] A list of VPN gateways contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning which replaces the list + of addresses when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + vpn_gateways = proto.RepeatedField( + proto.MESSAGE, + number=259177882, + message='VpnGateway', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class VpnTunnel(proto.Message): + r"""Represents a Cloud VPN Tunnel resource. For more information + about VPN, read the the Cloud VPN Overview. + + Attributes: + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + description (str): + An optional description of this resource. + Provide this property when you create the + resource. + + This field is a member of `oneof`_ ``_description``. + detailed_status (str): + [Output Only] Detailed status message for the VPN tunnel. + + This field is a member of `oneof`_ ``_detailed_status``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + ike_version (int): + IKE protocol version to use when establishing + the VPN tunnel with the peer VPN gateway. + Acceptable IKE versions are 1 or 2. The default + version is 2. + + This field is a member of `oneof`_ ``_ike_version``. + kind (str): + [Output Only] Type of resource. Always compute#vpnTunnel for + VPN tunnels. + + This field is a member of `oneof`_ ``_kind``. + local_traffic_selector (Sequence[str]): + Local traffic selector to use when + establishing the VPN tunnel with the peer VPN + gateway. The value should be a CIDR formatted + string, for example: 192.168.0.0/16. The ranges + must be disjoint. Only IPv4 is supported. + name (str): + Name of the resource. Provided by the client when the + resource is created. The name must be 1-63 characters long, + and comply with RFC1035. Specifically, the name must be 1-63 + characters long and match the regular expression + ``[a-z]([-a-z0-9]*[a-z0-9])?`` which means the first + character must be a lowercase letter, and all following + characters must be a dash, lowercase letter, or digit, + except the last character, which cannot be a dash. + + This field is a member of `oneof`_ ``_name``. + peer_external_gateway (str): + URL of the peer side external VPN gateway to + which this VPN tunnel is connected. Provided by + the client when the VPN tunnel is created. This + field is exclusive with the field + peerGcpGateway. + + This field is a member of `oneof`_ ``_peer_external_gateway``. + peer_external_gateway_interface (int): + The interface ID of the external VPN gateway + to which this VPN tunnel is connected. Provided + by the client when the VPN tunnel is created. + + This field is a member of `oneof`_ ``_peer_external_gateway_interface``. + peer_gcp_gateway (str): + URL of the peer side HA GCP VPN gateway to + which this VPN tunnel is connected. Provided by + the client when the VPN tunnel is created. This + field can be used when creating highly available + VPN from VPC network to VPC network, the field + is exclusive with the field peerExternalGateway. + If provided, the VPN tunnel will automatically + use the same vpnGatewayInterface ID in the peer + GCP VPN gateway. + + This field is a member of `oneof`_ ``_peer_gcp_gateway``. + peer_ip (str): + IP address of the peer VPN gateway. Only IPv4 + is supported. + + This field is a member of `oneof`_ ``_peer_ip``. + region (str): + [Output Only] URL of the region where the VPN tunnel + resides. You must specify this field as part of the HTTP + request URL. It is not settable as a field in the request + body. + + This field is a member of `oneof`_ ``_region``. + remote_traffic_selector (Sequence[str]): + Remote traffic selectors to use when + establishing the VPN tunnel with the peer VPN + gateway. The value should be a CIDR formatted + string, for example: 192.168.0.0/16. The ranges + should be disjoint. Only IPv4 is supported. + router (str): + URL of the router resource to be used for + dynamic routing. + + This field is a member of `oneof`_ ``_router``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + shared_secret (str): + Shared secret used to set the secure session + between the Cloud VPN gateway and the peer VPN + gateway. + + This field is a member of `oneof`_ ``_shared_secret``. + shared_secret_hash (str): + Hash of the shared secret. + + This field is a member of `oneof`_ ``_shared_secret_hash``. + status (str): + [Output Only] The status of the VPN tunnel, which can be one + of the following: - PROVISIONING: Resource is being + allocated for the VPN tunnel. - WAITING_FOR_FULL_CONFIG: + Waiting to receive all VPN-related configs from the user. + Network, TargetVpnGateway, VpnTunnel, ForwardingRule, and + Route resources are needed to setup the VPN tunnel. - + FIRST_HANDSHAKE: Successful first handshake with the peer + VPN. - ESTABLISHED: Secure session is successfully + established with the peer VPN. - NETWORK_ERROR: Deprecated, + replaced by NO_INCOMING_PACKETS - AUTHORIZATION_ERROR: Auth + error (for example, bad shared secret). - + NEGOTIATION_FAILURE: Handshake failed. - DEPROVISIONING: + Resources are being deallocated for the VPN tunnel. - + FAILED: Tunnel creation has failed and the tunnel is not + ready to be used. - NO_INCOMING_PACKETS: No incoming packets + from peer. - REJECTED: Tunnel configuration was rejected, + can be result of being denied access. - + ALLOCATING_RESOURCES: Cloud VPN is in the process of + allocating all required resources. - STOPPED: Tunnel is + stopped due to its Forwarding Rules being deleted for + Classic VPN tunnels or the project is in frozen state. - + PEER_IDENTITY_MISMATCH: Peer identity does not match peer + IP, probably behind NAT. - TS_NARROWING_NOT_ALLOWED: Traffic + selector narrowing not allowed for an HA-VPN tunnel. Check + the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + target_vpn_gateway (str): + URL of the Target VPN gateway with which this + VPN tunnel is associated. Provided by the client + when the VPN tunnel is created. + + This field is a member of `oneof`_ ``_target_vpn_gateway``. + vpn_gateway (str): + URL of the VPN gateway with which this VPN tunnel is + associated. Provided by the client when the VPN tunnel is + created. This must be used (instead of target_vpn_gateway) + if a High Availability VPN gateway resource is created. + + This field is a member of `oneof`_ ``_vpn_gateway``. + vpn_gateway_interface (int): + The interface ID of the VPN gateway with + which this VPN tunnel is associated. + + This field is a member of `oneof`_ ``_vpn_gateway_interface``. + """ + class Status(proto.Enum): + r"""[Output Only] The status of the VPN tunnel, which can be one of the + following: - PROVISIONING: Resource is being allocated for the VPN + tunnel. - WAITING_FOR_FULL_CONFIG: Waiting to receive all + VPN-related configs from the user. Network, TargetVpnGateway, + VpnTunnel, ForwardingRule, and Route resources are needed to setup + the VPN tunnel. - FIRST_HANDSHAKE: Successful first handshake with + the peer VPN. - ESTABLISHED: Secure session is successfully + established with the peer VPN. - NETWORK_ERROR: Deprecated, replaced + by NO_INCOMING_PACKETS - AUTHORIZATION_ERROR: Auth error (for + example, bad shared secret). - NEGOTIATION_FAILURE: Handshake + failed. - DEPROVISIONING: Resources are being deallocated for the + VPN tunnel. - FAILED: Tunnel creation has failed and the tunnel is + not ready to be used. - NO_INCOMING_PACKETS: No incoming packets + from peer. - REJECTED: Tunnel configuration was rejected, can be + result of being denied access. - ALLOCATING_RESOURCES: Cloud VPN is + in the process of allocating all required resources. - STOPPED: + Tunnel is stopped due to its Forwarding Rules being deleted for + Classic VPN tunnels or the project is in frozen state. - + PEER_IDENTITY_MISMATCH: Peer identity does not match peer IP, + probably behind NAT. - TS_NARROWING_NOT_ALLOWED: Traffic selector + narrowing not allowed for an HA-VPN tunnel. + """ + UNDEFINED_STATUS = 0 + ALLOCATING_RESOURCES = 320922816 + AUTHORIZATION_ERROR = 23580290 + DEPROVISIONING = 428935662 + ESTABLISHED = 88852344 + FAILED = 455706685 + FIRST_HANDSHAKE = 191393000 + NEGOTIATION_FAILURE = 360325868 + NETWORK_ERROR = 193912951 + NO_INCOMING_PACKETS = 119983216 + PROVISIONING = 290896621 + REJECTED = 174130302 + STOPPED = 444276141 + WAITING_FOR_FULL_CONFIG = 41640522 + + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + detailed_status = proto.Field( + proto.STRING, + number=333501025, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + ike_version = proto.Field( + proto.INT32, + number=218376220, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + local_traffic_selector = proto.RepeatedField( + proto.STRING, + number=317314613, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + peer_external_gateway = proto.Field( + proto.STRING, + number=384956173, + optional=True, + ) + peer_external_gateway_interface = proto.Field( + proto.INT32, + number=452768391, + optional=True, + ) + peer_gcp_gateway = proto.Field( + proto.STRING, + number=281867452, + optional=True, + ) + peer_ip = proto.Field( + proto.STRING, + number=383249700, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + remote_traffic_selector = proto.RepeatedField( + proto.STRING, + number=358887098, + ) + router = proto.Field( + proto.STRING, + number=148608841, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + shared_secret = proto.Field( + proto.STRING, + number=381932490, + optional=True, + ) + shared_secret_hash = proto.Field( + proto.STRING, + number=398881891, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + target_vpn_gateway = proto.Field( + proto.STRING, + number=532512843, + optional=True, + ) + vpn_gateway = proto.Field( + proto.STRING, + number=406684153, + optional=True, + ) + vpn_gateway_interface = proto.Field( + proto.INT32, + number=95979123, + optional=True, + ) + + +class VpnTunnelAggregatedList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.VpnTunnelAggregatedList.ItemsEntry]): + A list of VpnTunnelsScopedList resources. + kind (str): + [Output Only] Type of resource. Always compute#vpnTunnel for + VPN tunnels. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + unreachables (Sequence[str]): + [Output Only] Unreachable resources. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=100526016, + message='VpnTunnelsScopedList', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + unreachables = proto.RepeatedField( + proto.STRING, + number=243372063, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class VpnTunnelList(proto.Message): + r"""Contains a list of VpnTunnel resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.VpnTunnel]): + A list of VpnTunnel resources. + kind (str): + [Output Only] Type of resource. Always compute#vpnTunnel for + VPN tunnels. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='VpnTunnel', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class VpnTunnelsScopedList(proto.Message): + r""" + + Attributes: + vpn_tunnels (Sequence[google.cloud.compute_v1.types.VpnTunnel]): + A list of VPN tunnels contained in this + scope. + warning (google.cloud.compute_v1.types.Warning): + Informational warning which replaces the list + of addresses when the list is empty. + + This field is a member of `oneof`_ ``_warning``. + """ + + vpn_tunnels = proto.RepeatedField( + proto.MESSAGE, + number=163494080, + message='VpnTunnel', + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class WafExpressionSet(proto.Message): + r""" + + Attributes: + aliases (Sequence[str]): + A list of alternate IDs. The format should + be: - E.g. XSS-stable Generic suffix like + "stable" is particularly useful if a policy + likes to avail newer set of expressions without + having to change the policy. A given alias name + can't be used for more than one entity set. + expressions (Sequence[google.cloud.compute_v1.types.WafExpressionSetExpression]): + List of available expressions. + id (str): + Google specified expression set ID. The + format should be: - E.g. XSS-20170329 required + + This field is a member of `oneof`_ ``_id``. + """ + + aliases = proto.RepeatedField( + proto.STRING, + number=159207166, + ) + expressions = proto.RepeatedField( + proto.MESSAGE, + number=175554779, + message='WafExpressionSetExpression', + ) + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + + +class WafExpressionSetExpression(proto.Message): + r""" + + Attributes: + id (str): + Expression ID should uniquely identify the + origin of the expression. E.g. owasp- + crs-v020901-id973337 identifies Owasp core rule + set version 2.9.1 rule id 973337. The ID could + be used to determine the individual attack + definition that has been detected. It could also + be used to exclude it from the policy in case of + false positive. required + + This field is a member of `oneof`_ ``_id``. + """ + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + + +class WaitGlobalOperationRequest(proto.Message): + r"""A request message for GlobalOperations.Wait. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to return. + project (str): + Project ID for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + + +class WaitRegionOperationRequest(proto.Message): + r"""A request message for RegionOperations.Wait. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to return. + project (str): + Project ID for this request. + region (str): + Name of the region for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + region = proto.Field( + proto.STRING, + number=138946292, + ) + + +class WaitZoneOperationRequest(proto.Message): + r"""A request message for ZoneOperations.Wait. See the method + description for details. + + Attributes: + operation (str): + Name of the Operations resource to return. + project (str): + Project ID for this request. + zone (str): + Name of the zone for this request. + """ + + operation = proto.Field( + proto.STRING, + number=52090215, + ) + project = proto.Field( + proto.STRING, + number=227560217, + ) + zone = proto.Field( + proto.STRING, + number=3744684, + ) + + +class Warning(proto.Message): + r"""[Output Only] Informational warning message. + + Attributes: + code (str): + [Output Only] A warning code, if applicable. For example, + Compute Engine returns NO_RESULTS_ON_PAGE if there are no + results in the response. Check the Code enum for the list of + possible values. + + This field is a member of `oneof`_ ``_code``. + data (Sequence[google.cloud.compute_v1.types.Data]): + [Output Only] Metadata about this warning in key: value + format. For example: "data": [ { "key": "scope", "value": + "zones/us-east1-d" } + message (str): + [Output Only] A human-readable description of the warning + code. + + This field is a member of `oneof`_ ``_message``. + """ + class Code(proto.Enum): + r"""[Output Only] A warning code, if applicable. For example, Compute + Engine returns NO_RESULTS_ON_PAGE if there are no results in the + response. + """ + UNDEFINED_CODE = 0 + CLEANUP_FAILED = 150308440 + DEPRECATED_RESOURCE_USED = 391835586 + DEPRECATED_TYPE_USED = 346526230 + DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 369442967 + EXPERIMENTAL_TYPE_USED = 451954443 + EXTERNAL_API_WARNING = 175546307 + FIELD_VALUE_OVERRIDEN = 329669423 + INJECTED_KERNELS_DEPRECATED = 417377419 + LARGE_DEPLOYMENT_WARNING = 481440678 + MISSING_TYPE_DEPENDENCY = 344505463 + NEXT_HOP_ADDRESS_NOT_ASSIGNED = 324964999 + NEXT_HOP_CANNOT_IP_FORWARD = 383382887 + NEXT_HOP_INSTANCE_NOT_FOUND = 464250446 + NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 243758146 + NEXT_HOP_NOT_RUNNING = 417081265 + NOT_CRITICAL_ERROR = 105763924 + NO_RESULTS_ON_PAGE = 30036744 + PARTIAL_SUCCESS = 39966469 + REQUIRED_TOS_AGREEMENT = 3745539 + RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING = 496728641 + RESOURCE_NOT_DELETED = 168598460 + SCHEMA_VALIDATION_IGNORED = 275245642 + SINGLE_INSTANCE_PROPERTY_TEMPLATE = 268305617 + UNDECLARED_PROPERTIES = 390513439 + UNREACHABLE = 13328052 + + code = proto.Field( + proto.STRING, + number=3059181, + optional=True, + ) + data = proto.RepeatedField( + proto.MESSAGE, + number=3076010, + message='Data', + ) + message = proto.Field( + proto.STRING, + number=418054151, + optional=True, + ) + + +class Warnings(proto.Message): + r""" + + Attributes: + code (str): + [Output Only] A warning code, if applicable. For example, + Compute Engine returns NO_RESULTS_ON_PAGE if there are no + results in the response. Check the Code enum for the list of + possible values. + + This field is a member of `oneof`_ ``_code``. + data (Sequence[google.cloud.compute_v1.types.Data]): + [Output Only] Metadata about this warning in key: value + format. For example: "data": [ { "key": "scope", "value": + "zones/us-east1-d" } + message (str): + [Output Only] A human-readable description of the warning + code. + + This field is a member of `oneof`_ ``_message``. + """ + class Code(proto.Enum): + r"""[Output Only] A warning code, if applicable. For example, Compute + Engine returns NO_RESULTS_ON_PAGE if there are no results in the + response. + """ + UNDEFINED_CODE = 0 + CLEANUP_FAILED = 150308440 + DEPRECATED_RESOURCE_USED = 391835586 + DEPRECATED_TYPE_USED = 346526230 + DISK_SIZE_LARGER_THAN_IMAGE_SIZE = 369442967 + EXPERIMENTAL_TYPE_USED = 451954443 + EXTERNAL_API_WARNING = 175546307 + FIELD_VALUE_OVERRIDEN = 329669423 + INJECTED_KERNELS_DEPRECATED = 417377419 + LARGE_DEPLOYMENT_WARNING = 481440678 + MISSING_TYPE_DEPENDENCY = 344505463 + NEXT_HOP_ADDRESS_NOT_ASSIGNED = 324964999 + NEXT_HOP_CANNOT_IP_FORWARD = 383382887 + NEXT_HOP_INSTANCE_NOT_FOUND = 464250446 + NEXT_HOP_INSTANCE_NOT_ON_NETWORK = 243758146 + NEXT_HOP_NOT_RUNNING = 417081265 + NOT_CRITICAL_ERROR = 105763924 + NO_RESULTS_ON_PAGE = 30036744 + PARTIAL_SUCCESS = 39966469 + REQUIRED_TOS_AGREEMENT = 3745539 + RESOURCE_IN_USE_BY_OTHER_RESOURCE_WARNING = 496728641 + RESOURCE_NOT_DELETED = 168598460 + SCHEMA_VALIDATION_IGNORED = 275245642 + SINGLE_INSTANCE_PROPERTY_TEMPLATE = 268305617 + UNDECLARED_PROPERTIES = 390513439 + UNREACHABLE = 13328052 + + code = proto.Field( + proto.STRING, + number=3059181, + optional=True, + ) + data = proto.RepeatedField( + proto.MESSAGE, + number=3076010, + message='Data', + ) + message = proto.Field( + proto.STRING, + number=418054151, + optional=True, + ) + + +class WeightedBackendService(proto.Message): + r"""In contrast to a single BackendService in HttpRouteAction to + which all matching traffic is directed to, + WeightedBackendService allows traffic to be split across + multiple BackendServices. The volume of traffic for each + BackendService is proportional to the weight specified in each + WeightedBackendService + + Attributes: + backend_service (str): + The full or partial URL to the default + BackendService resource. Before forwarding the + request to backendService, the loadbalancer + applies any relevant headerActions specified as + part of this backendServiceWeight. + + This field is a member of `oneof`_ ``_backend_service``. + header_action (google.cloud.compute_v1.types.HttpHeaderAction): + Specifies changes to request and response + headers that need to take effect for the + selected backendService. headerAction specified + here take effect before headerAction in the + enclosing HttpRouteRule, PathMatcher and UrlMap. + Note that headerAction is not supported for + Loadbalancers that have their + loadBalancingScheme set to EXTERNAL. Not + supported when the URL map is bound to target + gRPC proxy that has validateForProxyless field + set to true. + + This field is a member of `oneof`_ ``_header_action``. + weight (int): + Specifies the fraction of traffic sent to + backendService, computed as weight / (sum of all + weightedBackendService weights in routeAction) . + The selection of a backend service is determined + only for new traffic. Once a user's request has + been directed to a backendService, subsequent + requests will be sent to the same backendService + as determined by the BackendService's session + affinity policy. The value must be between 0 and + 1000 + + This field is a member of `oneof`_ ``_weight``. + """ + + backend_service = proto.Field( + proto.STRING, + number=306946058, + optional=True, + ) + header_action = proto.Field( + proto.MESSAGE, + number=328077352, + optional=True, + message='HttpHeaderAction', + ) + weight = proto.Field( + proto.UINT32, + number=282149496, + optional=True, + ) + + +class XpnHostList(proto.Message): + r""" + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Project]): + [Output Only] A list of shared VPC host project URLs. + kind (str): + [Output Only] Type of resource. Always compute#xpnHostList + for lists of shared VPC hosts. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Project', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class XpnResourceId(proto.Message): + r"""Service resource (a.k.a service project) ID. + + Attributes: + id (str): + The ID of the service resource. In the case + of projects, this field supports project id + (e.g., my-project-123) and project number (e.g. + 12345678). + + This field is a member of `oneof`_ ``_id``. + type_ (str): + The type of the service resource. + Check the Type enum for the list of possible + values. + + This field is a member of `oneof`_ ``_type``. + """ + class Type(proto.Enum): + r"""The type of the service resource.""" + UNDEFINED_TYPE = 0 + PROJECT = 408671993 + XPN_RESOURCE_TYPE_UNSPECIFIED = 151607034 + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + type_ = proto.Field( + proto.STRING, + number=3575610, + optional=True, + ) + + +class Zone(proto.Message): + r"""Represents a Zone resource. A zone is a deployment area. + These deployment areas are subsets of a region. For example the + zone us-east1-a is located in the us-east1 region. For more + information, read Regions and Zones. + + Attributes: + available_cpu_platforms (Sequence[str]): + [Output Only] Available cpu/platform selections for the + zone. + creation_timestamp (str): + [Output Only] Creation timestamp in RFC3339 text format. + + This field is a member of `oneof`_ ``_creation_timestamp``. + deprecated (google.cloud.compute_v1.types.DeprecationStatus): + [Output Only] The deprecation status associated with this + zone. + + This field is a member of `oneof`_ ``_deprecated``. + description (str): + [Output Only] Textual description of the resource. + + This field is a member of `oneof`_ ``_description``. + id (int): + [Output Only] The unique identifier for the resource. This + identifier is defined by the server. + + This field is a member of `oneof`_ ``_id``. + kind (str): + [Output Only] Type of the resource. Always compute#zone for + zones. + + This field is a member of `oneof`_ ``_kind``. + name (str): + [Output Only] Name of the resource. + + This field is a member of `oneof`_ ``_name``. + region (str): + [Output Only] Full URL reference to the region which hosts + the zone. + + This field is a member of `oneof`_ ``_region``. + self_link (str): + [Output Only] Server-defined URL for the resource. + + This field is a member of `oneof`_ ``_self_link``. + status (str): + [Output Only] Status of the zone, either UP or DOWN. Check + the Status enum for the list of possible values. + + This field is a member of `oneof`_ ``_status``. + supports_pzs (bool): + [Output Only] Reserved for future use. + + This field is a member of `oneof`_ ``_supports_pzs``. + """ + class Status(proto.Enum): + r"""[Output Only] Status of the zone, either UP or DOWN.""" + UNDEFINED_STATUS = 0 + DOWN = 2104482 + UP = 2715 + + available_cpu_platforms = proto.RepeatedField( + proto.STRING, + number=175536531, + ) + creation_timestamp = proto.Field( + proto.STRING, + number=30525366, + optional=True, + ) + deprecated = proto.Field( + proto.MESSAGE, + number=515138995, + optional=True, + message='DeprecationStatus', + ) + description = proto.Field( + proto.STRING, + number=422937596, + optional=True, + ) + id = proto.Field( + proto.UINT64, + number=3355, + optional=True, + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + name = proto.Field( + proto.STRING, + number=3373707, + optional=True, + ) + region = proto.Field( + proto.STRING, + number=138946292, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + status = proto.Field( + proto.STRING, + number=181260274, + optional=True, + ) + supports_pzs = proto.Field( + proto.BOOL, + number=83983214, + optional=True, + ) + + +class ZoneList(proto.Message): + r"""Contains a list of zone resources. + + Attributes: + id (str): + [Output Only] Unique identifier for the resource; defined by + the server. + + This field is a member of `oneof`_ ``_id``. + items (Sequence[google.cloud.compute_v1.types.Zone]): + A list of Zone resources. + kind (str): + Type of resource. + + This field is a member of `oneof`_ ``_kind``. + next_page_token (str): + [Output Only] This token allows you to get the next page of + results for list requests. If the number of results is + larger than maxResults, use the nextPageToken as a value for + the query parameter pageToken in the next list request. + Subsequent list requests will have their own nextPageToken + to continue paging through the results. + + This field is a member of `oneof`_ ``_next_page_token``. + self_link (str): + [Output Only] Server-defined URL for this resource. + + This field is a member of `oneof`_ ``_self_link``. + warning (google.cloud.compute_v1.types.Warning): + [Output Only] Informational warning message. + + This field is a member of `oneof`_ ``_warning``. + """ + + @property + def raw_page(self): + return self + + id = proto.Field( + proto.STRING, + number=3355, + optional=True, + ) + items = proto.RepeatedField( + proto.MESSAGE, + number=100526016, + message='Zone', + ) + kind = proto.Field( + proto.STRING, + number=3292052, + optional=True, + ) + next_page_token = proto.Field( + proto.STRING, + number=79797525, + optional=True, + ) + self_link = proto.Field( + proto.STRING, + number=456214797, + optional=True, + ) + warning = proto.Field( + proto.MESSAGE, + number=50704284, + optional=True, + message='Warning', + ) + + +class ZoneSetLabelsRequest(proto.Message): + r""" + + Attributes: + label_fingerprint (str): + The fingerprint of the previous set of labels + for this resource, used to detect conflicts. The + fingerprint is initially generated by Compute + Engine and changes after every request to modify + or update labels. You must always provide an up- + to-date fingerprint hash in order to update or + change labels. Make a get() request to the + resource to get the latest fingerprint. + + This field is a member of `oneof`_ ``_label_fingerprint``. + labels (Sequence[google.cloud.compute_v1.types.ZoneSetLabelsRequest.LabelsEntry]): + The labels to set for this resource. + """ + + label_fingerprint = proto.Field( + proto.STRING, + number=178124825, + optional=True, + ) + labels = proto.MapField( + proto.STRING, + proto.STRING, + number=500195327, + ) + + +class ZoneSetPolicyRequest(proto.Message): + r""" + + Attributes: + bindings (Sequence[google.cloud.compute_v1.types.Binding]): + Flatten Policy to create a backwacd + compatible wire-format. Deprecated. Use 'policy' + to specify bindings. + etag (str): + Flatten Policy to create a backward + compatible wire-format. Deprecated. Use 'policy' + to specify the etag. + + This field is a member of `oneof`_ ``_etag``. + policy (google.cloud.compute_v1.types.Policy): + REQUIRED: The complete policy to be applied + to the 'resource'. The size of the policy is + limited to a few 10s of KB. An empty policy is + in general a valid policy but certain services + (like Projects) might reject them. + + This field is a member of `oneof`_ ``_policy``. + """ + + bindings = proto.RepeatedField( + proto.MESSAGE, + number=403251854, + message='Binding', + ) + etag = proto.Field( + proto.STRING, + number=3123477, + optional=True, + ) + policy = proto.Field( + proto.MESSAGE, + number=91071794, + optional=True, + message='Policy', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/v1/mypy.ini b/owl-bot-staging/v1/mypy.ini new file mode 100644 index 000000000..4505b4854 --- /dev/null +++ b/owl-bot-staging/v1/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.6 +namespace_packages = True diff --git a/owl-bot-staging/v1/noxfile.py b/owl-bot-staging/v1/noxfile.py new file mode 100644 index 000000000..9d82b4402 --- /dev/null +++ b/owl-bot-staging/v1/noxfile.py @@ -0,0 +1,132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", +] + +@nox.session(python=['3.6', '3.7', '3.8', '3.9', '3.10']) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/compute_v1/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python='3.9') +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=['3.6', '3.7', '3.8', '3.9']) +def mypy(session): + """Run the type checker.""" + session.install('mypy', 'types-pkg_resources') + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python='3.9') +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx<3.0.0", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) diff --git a/owl-bot-staging/v1/scripts/fixup_compute_v1_keywords.py b/owl-bot-staging/v1/scripts/fixup_compute_v1_keywords.py new file mode 100644 index 000000000..1be5ad369 --- /dev/null +++ b/owl-bot-staging/v1/scripts/fixup_compute_v1_keywords.py @@ -0,0 +1,303 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class computeCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'abandon_instances': ('instance_group_manager', 'instance_group_managers_abandon_instances_request_resource', 'project', 'zone', 'request_id', ), + 'add_access_config': ('access_config_resource', 'instance', 'network_interface', 'project', 'zone', 'request_id', ), + 'add_association': ('firewall_policy', 'firewall_policy_association_resource', 'replace_existing_association', 'request_id', ), + 'add_health_check': ('project', 'region', 'target_pool', 'target_pools_add_health_check_request_resource', 'request_id', ), + 'add_instance': ('project', 'region', 'target_pool', 'target_pools_add_instance_request_resource', 'request_id', ), + 'add_instances': ('instance_group', 'instance_groups_add_instances_request_resource', 'project', 'zone', 'request_id', ), + 'add_nodes': ('node_group', 'node_groups_add_nodes_request_resource', 'project', 'zone', 'request_id', ), + 'add_peering': ('network', 'networks_add_peering_request_resource', 'project', 'request_id', ), + 'add_resource_policies': ('disk', 'disks_add_resource_policies_request_resource', 'project', 'zone', 'request_id', ), + 'add_rule': ('firewall_policy', 'firewall_policy_rule_resource', 'request_id', ), + 'add_signed_url_key': ('backend_bucket', 'project', 'signed_url_key_resource', 'request_id', ), + 'aggregated_list': ('project', 'filter', 'include_all_scopes', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'apply_updates_to_instances': ('instance_group_manager', 'instance_group_managers_apply_updates_request_resource', 'project', 'zone', ), + 'attach_disk': ('attached_disk_resource', 'instance', 'project', 'zone', 'force_attach', 'request_id', ), + 'attach_network_endpoints': ('global_network_endpoint_groups_attach_endpoints_request_resource', 'network_endpoint_group', 'project', 'request_id', ), + 'bulk_insert': ('bulk_insert_instance_resource_resource', 'project', 'zone', 'request_id', ), + 'clone_rules': ('firewall_policy', 'request_id', 'source_firewall_policy', ), + 'create_instances': ('instance_group_manager', 'instance_group_managers_create_instances_request_resource', 'project', 'zone', 'request_id', ), + 'create_snapshot': ('disk', 'project', 'snapshot_resource', 'zone', 'guest_flush', 'request_id', ), + 'delete': ('address', 'project', 'region', 'request_id', ), + 'delete_access_config': ('access_config', 'instance', 'network_interface', 'project', 'zone', 'request_id', ), + 'delete_instances': ('instance_group_manager', 'instance_group_managers_delete_instances_request_resource', 'project', 'zone', 'request_id', ), + 'delete_nodes': ('node_group', 'node_groups_delete_nodes_request_resource', 'project', 'zone', 'request_id', ), + 'delete_per_instance_configs': ('instance_group_manager', 'instance_group_managers_delete_per_instance_configs_req_resource', 'project', 'zone', ), + 'delete_signed_url_key': ('backend_bucket', 'key_name', 'project', 'request_id', ), + 'deprecate': ('deprecation_status_resource', 'image', 'project', 'request_id', ), + 'detach_disk': ('device_name', 'instance', 'project', 'zone', 'request_id', ), + 'detach_network_endpoints': ('global_network_endpoint_groups_detach_endpoints_request_resource', 'network_endpoint_group', 'project', 'request_id', ), + 'disable_xpn_host': ('project', 'request_id', ), + 'disable_xpn_resource': ('project', 'projects_disable_xpn_resource_request_resource', 'request_id', ), + 'enable_xpn_host': ('project', 'request_id', ), + 'enable_xpn_resource': ('project', 'projects_enable_xpn_resource_request_resource', 'request_id', ), + 'expand_ip_cidr_range': ('project', 'region', 'subnetwork', 'subnetworks_expand_ip_cidr_range_request_resource', 'request_id', ), + 'get': ('accelerator_type', 'project', 'zone', ), + 'get_association': ('firewall_policy', 'name', ), + 'get_diagnostics': ('interconnect', 'project', ), + 'get_effective_firewalls': ('instance', 'network_interface', 'project', 'zone', ), + 'get_from_family': ('family', 'project', ), + 'get_guest_attributes': ('instance', 'project', 'zone', 'query_path', 'variable_key', ), + 'get_health': ('backend_service', 'project', 'resource_group_reference_resource', ), + 'get_iam_policy': ('project', 'resource', 'zone', 'options_requested_policy_version', ), + 'get_nat_mapping_info': ('project', 'region', 'router', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'get_router_status': ('project', 'region', 'router', ), + 'get_rule': ('firewall_policy', 'priority', ), + 'get_screenshot': ('instance', 'project', 'zone', ), + 'get_serial_port_output': ('instance', 'project', 'zone', 'port', 'start', ), + 'get_shielded_instance_identity': ('instance', 'project', 'zone', ), + 'get_status': ('project', 'region', 'vpn_gateway', ), + 'get_xpn_host': ('project', ), + 'get_xpn_resources': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'insert': ('address_resource', 'project', 'region', 'request_id', ), + 'invalidate_cache': ('cache_invalidation_rule_resource', 'project', 'url_map', 'request_id', ), + 'list': ('project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_associations': ('target_resource', ), + 'list_available_features': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_errors': ('instance_group_manager', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_instances': ('instance_group', 'instance_groups_list_instances_request_resource', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_managed_instances': ('instance_group_manager', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_network_endpoints': ('network_endpoint_group', 'project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_nodes': ('node_group', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_peering_routes': ('network', 'project', 'direction', 'filter', 'max_results', 'order_by', 'page_token', 'peering_name', 'region', 'return_partial_success', ), + 'list_per_instance_configs': ('instance_group_manager', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_preconfigured_expression_sets': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_referrers': ('instance', 'project', 'zone', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_usable': ('project', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'list_xpn_hosts': ('project', 'projects_list_xpn_hosts_request_resource', 'filter', 'max_results', 'order_by', 'page_token', 'return_partial_success', ), + 'move': ('firewall_policy', 'parent_id', 'request_id', ), + 'move_disk': ('disk_move_request_resource', 'project', 'request_id', ), + 'move_instance': ('instance_move_request_resource', 'project', 'request_id', ), + 'patch': ('autoscaler_resource', 'project', 'zone', 'autoscaler', 'request_id', ), + 'patch_per_instance_configs': ('instance_group_manager', 'instance_group_managers_patch_per_instance_configs_req_resource', 'project', 'zone', 'request_id', ), + 'patch_rule': ('firewall_policy', 'firewall_policy_rule_resource', 'priority', 'request_id', ), + 'preview': ('project', 'region', 'router', 'router_resource', ), + 'recreate_instances': ('instance_group_manager', 'instance_group_managers_recreate_instances_request_resource', 'project', 'zone', 'request_id', ), + 'remove_association': ('firewall_policy', 'name', 'request_id', ), + 'remove_health_check': ('project', 'region', 'target_pool', 'target_pools_remove_health_check_request_resource', 'request_id', ), + 'remove_instance': ('project', 'region', 'target_pool', 'target_pools_remove_instance_request_resource', 'request_id', ), + 'remove_instances': ('instance_group', 'instance_groups_remove_instances_request_resource', 'project', 'zone', 'request_id', ), + 'remove_peering': ('network', 'networks_remove_peering_request_resource', 'project', 'request_id', ), + 'remove_resource_policies': ('disk', 'disks_remove_resource_policies_request_resource', 'project', 'zone', 'request_id', ), + 'remove_rule': ('firewall_policy', 'priority', 'request_id', ), + 'reset': ('instance', 'project', 'zone', 'request_id', ), + 'resize': ('disk', 'disks_resize_request_resource', 'project', 'zone', 'request_id', ), + 'send_diagnostic_interrupt': ('instance', 'project', 'zone', ), + 'set_backend_service': ('project', 'target_ssl_proxies_set_backend_service_request_resource', 'target_ssl_proxy', 'request_id', ), + 'set_backup': ('project', 'region', 'target_pool', 'target_reference_resource', 'failover_ratio', 'request_id', ), + 'set_common_instance_metadata': ('metadata_resource', 'project', 'request_id', ), + 'set_default_network_tier': ('project', 'projects_set_default_network_tier_request_resource', 'request_id', ), + 'set_deletion_protection': ('project', 'resource', 'zone', 'deletion_protection', 'request_id', ), + 'set_disk_auto_delete': ('auto_delete', 'device_name', 'instance', 'project', 'zone', 'request_id', ), + 'set_iam_policy': ('project', 'resource', 'zone', 'zone_set_policy_request_resource', ), + 'set_instance_template': ('instance_group_manager', 'instance_group_managers_set_instance_template_request_resource', 'project', 'zone', 'request_id', ), + 'set_labels': ('project', 'resource', 'zone', 'zone_set_labels_request_resource', 'request_id', ), + 'set_machine_resources': ('instance', 'instances_set_machine_resources_request_resource', 'project', 'zone', 'request_id', ), + 'set_machine_type': ('instance', 'instances_set_machine_type_request_resource', 'project', 'zone', 'request_id', ), + 'set_metadata': ('instance', 'metadata_resource', 'project', 'zone', 'request_id', ), + 'set_min_cpu_platform': ('instance', 'instances_set_min_cpu_platform_request_resource', 'project', 'zone', 'request_id', ), + 'set_named_ports': ('instance_group', 'instance_groups_set_named_ports_request_resource', 'project', 'zone', 'request_id', ), + 'set_node_template': ('node_group', 'node_groups_set_node_template_request_resource', 'project', 'zone', 'request_id', ), + 'set_private_ip_google_access': ('project', 'region', 'subnetwork', 'subnetworks_set_private_ip_google_access_request_resource', 'request_id', ), + 'set_proxy_header': ('project', 'target_ssl_proxies_set_proxy_header_request_resource', 'target_ssl_proxy', 'request_id', ), + 'set_quic_override': ('project', 'target_https_proxies_set_quic_override_request_resource', 'target_https_proxy', 'request_id', ), + 'set_scheduling': ('instance', 'project', 'scheduling_resource', 'zone', 'request_id', ), + 'set_security_policy': ('backend_service', 'project', 'security_policy_reference_resource', 'request_id', ), + 'set_service_account': ('instance', 'instances_set_service_account_request_resource', 'project', 'zone', 'request_id', ), + 'set_shielded_instance_integrity_policy': ('instance', 'project', 'shielded_instance_integrity_policy_resource', 'zone', 'request_id', ), + 'set_ssl_certificates': ('project', 'region', 'region_target_https_proxies_set_ssl_certificates_request_resource', 'target_https_proxy', 'request_id', ), + 'set_ssl_policy': ('project', 'ssl_policy_reference_resource', 'target_https_proxy', 'request_id', ), + 'set_tags': ('instance', 'project', 'tags_resource', 'zone', 'request_id', ), + 'set_target': ('forwarding_rule', 'project', 'region', 'target_reference_resource', 'request_id', ), + 'set_target_pools': ('instance_group_manager', 'instance_group_managers_set_target_pools_request_resource', 'project', 'zone', 'request_id', ), + 'set_url_map': ('project', 'region', 'target_http_proxy', 'url_map_reference_resource', 'request_id', ), + 'set_usage_export_bucket': ('project', 'usage_export_location_resource', 'request_id', ), + 'simulate_maintenance_event': ('instance', 'project', 'zone', ), + 'start': ('instance', 'project', 'zone', 'request_id', ), + 'start_with_encryption_key': ('instance', 'instances_start_with_encryption_key_request_resource', 'project', 'zone', 'request_id', ), + 'stop': ('instance', 'project', 'zone', 'request_id', ), + 'switch_to_custom_mode': ('network', 'project', 'request_id', ), + 'test_iam_permissions': ('project', 'resource', 'test_permissions_request_resource', 'zone', ), + 'update': ('autoscaler_resource', 'project', 'zone', 'autoscaler', 'request_id', ), + 'update_access_config': ('access_config_resource', 'instance', 'network_interface', 'project', 'zone', 'request_id', ), + 'update_display_device': ('display_device_resource', 'instance', 'project', 'zone', 'request_id', ), + 'update_network_interface': ('instance', 'network_interface', 'network_interface_resource', 'project', 'zone', 'request_id', ), + 'update_peering': ('network', 'networks_update_peering_request_resource', 'project', 'request_id', ), + 'update_per_instance_configs': ('instance_group_manager', 'instance_group_managers_update_per_instance_configs_req_resource', 'project', 'zone', 'request_id', ), + 'update_shielded_instance_config': ('instance', 'project', 'shielded_instance_config_resource', 'zone', 'request_id', ), + 'validate': ('project', 'region', 'region_url_maps_validate_request_resource', 'url_map', ), + 'wait': ('operation', 'project', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=computeCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the compute client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/v1/setup.py b/owl-bot-staging/v1/setup.py new file mode 100644 index 000000000..8471b8671 --- /dev/null +++ b/owl-bot-staging/v1/setup.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os +import setuptools # type: ignore + +version = '0.1.0' + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, 'README.rst') +with io.open(readme_filename, encoding='utf-8') as readme_file: + readme = readme_file.read() + +setuptools.setup( + name='google-cloud-compute', + version=version, + long_description=readme, + packages=setuptools.PEP420PackageFinder.find(), + namespace_packages=('google', 'google.cloud'), + platforms='Posix; MacOS X; Windows', + include_package_data=True, + install_requires=( + 'google-api-core[grpc] >= 2.2.0, < 3.0.0dev', + 'libcst >= 0.2.5', + 'proto-plus >= 1.19.7', + ), + python_requires='>=3.6', + classifiers=[ + 'Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Topic :: Internet', + 'Topic :: Software Development :: Libraries :: Python Modules', + ], + zip_safe=False, +) diff --git a/owl-bot-staging/v1/tests/__init__.py b/owl-bot-staging/v1/tests/__init__.py new file mode 100644 index 000000000..b54a5fcc4 --- /dev/null +++ b/owl-bot-staging/v1/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/__init__.py b/owl-bot-staging/v1/tests/unit/__init__.py new file mode 100644 index 000000000..b54a5fcc4 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/__init__.py new file mode 100644 index 000000000..b54a5fcc4 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/__init__.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/__init__.py new file mode 100644 index 000000000..b54a5fcc4 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_accelerator_types.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_accelerator_types.py new file mode 100644 index 000000000..4bc4da264 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_accelerator_types.py @@ -0,0 +1,1122 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.accelerator_types import AcceleratorTypesClient +from google.cloud.compute_v1.services.accelerator_types import pagers +from google.cloud.compute_v1.services.accelerator_types import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AcceleratorTypesClient._get_default_mtls_endpoint(None) is None + assert AcceleratorTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert AcceleratorTypesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert AcceleratorTypesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert AcceleratorTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert AcceleratorTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + AcceleratorTypesClient, +]) +def test_accelerator_types_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.AcceleratorTypesRestTransport, "rest"), +]) +def test_accelerator_types_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + AcceleratorTypesClient, +]) +def test_accelerator_types_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_accelerator_types_client_get_transport_class(): + transport = AcceleratorTypesClient.get_transport_class() + available_transports = [ + transports.AcceleratorTypesRestTransport, + ] + assert transport in available_transports + + transport = AcceleratorTypesClient.get_transport_class("rest") + assert transport == transports.AcceleratorTypesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AcceleratorTypesClient, transports.AcceleratorTypesRestTransport, "rest"), +]) +@mock.patch.object(AcceleratorTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AcceleratorTypesClient)) +def test_accelerator_types_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AcceleratorTypesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AcceleratorTypesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (AcceleratorTypesClient, transports.AcceleratorTypesRestTransport, "rest", "true"), + (AcceleratorTypesClient, transports.AcceleratorTypesRestTransport, "rest", "false"), +]) +@mock.patch.object(AcceleratorTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AcceleratorTypesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_accelerator_types_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AcceleratorTypesClient, transports.AcceleratorTypesRestTransport, "rest"), +]) +def test_accelerator_types_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AcceleratorTypesClient, transports.AcceleratorTypesRestTransport, "rest"), +]) +def test_accelerator_types_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListAcceleratorTypesRequest): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AcceleratorTypeAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AcceleratorTypeAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListAcceleratorTypesRequest): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AcceleratorTypeAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AcceleratorTypeAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/acceleratorTypes" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListAcceleratorTypesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.AcceleratorTypeAggregatedList( + items={ + 'a':compute.AcceleratorTypesScopedList(), + 'b':compute.AcceleratorTypesScopedList(), + 'c':compute.AcceleratorTypesScopedList(), + }, + next_page_token='abc', + ), + compute.AcceleratorTypeAggregatedList( + items={}, + next_page_token='def', + ), + compute.AcceleratorTypeAggregatedList( + items={ + 'g':compute.AcceleratorTypesScopedList(), + }, + next_page_token='ghi', + ), + compute.AcceleratorTypeAggregatedList( + items={ + 'h':compute.AcceleratorTypesScopedList(), + 'i':compute.AcceleratorTypesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.AcceleratorTypeAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.AcceleratorTypesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.AcceleratorTypesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.AcceleratorTypesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetAcceleratorTypeRequest): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "accelerator_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AcceleratorType( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + maximum_cards_per_instance=2756, + name='name_value', + self_link='self_link_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AcceleratorType.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.AcceleratorType) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.maximum_cards_per_instance == 2756 + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetAcceleratorTypeRequest): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "accelerator_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AcceleratorType() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AcceleratorType.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "accelerator_type": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + accelerator_type='accelerator_type_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/acceleratorTypes/{accelerator_type}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetAcceleratorTypeRequest(), + project='project_value', + zone='zone_value', + accelerator_type='accelerator_type_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListAcceleratorTypesRequest): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AcceleratorTypeList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AcceleratorTypeList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListAcceleratorTypesRequest): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AcceleratorTypeList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AcceleratorTypeList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/acceleratorTypes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListAcceleratorTypesRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.AcceleratorTypeList( + items=[ + compute.AcceleratorType(), + compute.AcceleratorType(), + compute.AcceleratorType(), + ], + next_page_token='abc', + ), + compute.AcceleratorTypeList( + items=[], + next_page_token='def', + ), + compute.AcceleratorTypeList( + items=[ + compute.AcceleratorType(), + ], + next_page_token='ghi', + ), + compute.AcceleratorTypeList( + items=[ + compute.AcceleratorType(), + compute.AcceleratorType(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.AcceleratorTypeList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.AcceleratorType) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AcceleratorTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AcceleratorTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AcceleratorTypesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AcceleratorTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AcceleratorTypesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AcceleratorTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AcceleratorTypesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.AcceleratorTypesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_accelerator_types_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.AcceleratorTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_accelerator_types_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.accelerator_types.transports.AcceleratorTypesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.AcceleratorTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_accelerator_types_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.accelerator_types.transports.AcceleratorTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AcceleratorTypesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_accelerator_types_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.accelerator_types.transports.AcceleratorTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AcceleratorTypesTransport() + adc.assert_called_once() + + +def test_accelerator_types_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AcceleratorTypesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_accelerator_types_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.AcceleratorTypesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_accelerator_types_host_no_port(): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_accelerator_types_host_with_port(): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = AcceleratorTypesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = AcceleratorTypesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AcceleratorTypesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = AcceleratorTypesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = AcceleratorTypesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AcceleratorTypesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = AcceleratorTypesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = AcceleratorTypesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AcceleratorTypesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = AcceleratorTypesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = AcceleratorTypesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AcceleratorTypesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = AcceleratorTypesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = AcceleratorTypesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AcceleratorTypesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.AcceleratorTypesTransport, '_prep_wrapped_messages') as prep: + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.AcceleratorTypesTransport, '_prep_wrapped_messages') as prep: + transport_class = AcceleratorTypesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = AcceleratorTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_addresses.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_addresses.py new file mode 100644 index 000000000..d523f66db --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_addresses.py @@ -0,0 +1,1444 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.addresses import AddressesClient +from google.cloud.compute_v1.services.addresses import pagers +from google.cloud.compute_v1.services.addresses import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AddressesClient._get_default_mtls_endpoint(None) is None + assert AddressesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert AddressesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert AddressesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert AddressesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert AddressesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + AddressesClient, +]) +def test_addresses_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.AddressesRestTransport, "rest"), +]) +def test_addresses_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + AddressesClient, +]) +def test_addresses_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_addresses_client_get_transport_class(): + transport = AddressesClient.get_transport_class() + available_transports = [ + transports.AddressesRestTransport, + ] + assert transport in available_transports + + transport = AddressesClient.get_transport_class("rest") + assert transport == transports.AddressesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AddressesClient, transports.AddressesRestTransport, "rest"), +]) +@mock.patch.object(AddressesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AddressesClient)) +def test_addresses_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AddressesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AddressesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (AddressesClient, transports.AddressesRestTransport, "rest", "true"), + (AddressesClient, transports.AddressesRestTransport, "rest", "false"), +]) +@mock.patch.object(AddressesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AddressesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_addresses_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AddressesClient, transports.AddressesRestTransport, "rest"), +]) +def test_addresses_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AddressesClient, transports.AddressesRestTransport, "rest"), +]) +def test_addresses_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListAddressesRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AddressAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AddressAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListAddressesRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AddressAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AddressAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/addresses" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListAddressesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.AddressAggregatedList( + items={ + 'a':compute.AddressesScopedList(), + 'b':compute.AddressesScopedList(), + 'c':compute.AddressesScopedList(), + }, + next_page_token='abc', + ), + compute.AddressAggregatedList( + items={}, + next_page_token='def', + ), + compute.AddressAggregatedList( + items={ + 'g':compute.AddressesScopedList(), + }, + next_page_token='ghi', + ), + compute.AddressAggregatedList( + items={ + 'h':compute.AddressesScopedList(), + 'i':compute.AddressesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.AddressAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.AddressesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.AddressesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.AddressesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteAddressRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "address": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteAddressRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "address": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "address": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + address='address_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/addresses/{address}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteAddressRequest(), + project='project_value', + region='region_value', + address='address_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetAddressRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "address": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Address( + address='address_value', + address_type='address_type_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + ip_version='ip_version_value', + kind='kind_value', + name='name_value', + network='network_value', + network_tier='network_tier_value', + prefix_length=1391, + purpose='purpose_value', + region='region_value', + self_link='self_link_value', + status='status_value', + subnetwork='subnetwork_value', + users=['users_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Address.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Address) + assert response.address == 'address_value' + assert response.address_type == 'address_type_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.ip_version == 'ip_version_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.network_tier == 'network_tier_value' + assert response.prefix_length == 1391 + assert response.purpose == 'purpose_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.subnetwork == 'subnetwork_value' + assert response.users == ['users_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetAddressRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "address": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Address() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Address.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "address": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + address='address_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/addresses/{address}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetAddressRequest(), + project='project_value', + region='region_value', + address='address_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertAddressRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["address_resource"] = compute.Address(address='address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertAddressRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["address_resource"] = compute.Address(address='address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + address_resource=compute.Address(address='address_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/addresses" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertAddressRequest(), + project='project_value', + region='region_value', + address_resource=compute.Address(address='address_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListAddressesRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AddressList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AddressList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListAddressesRequest): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AddressList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AddressList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/addresses" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListAddressesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.AddressList( + items=[ + compute.Address(), + compute.Address(), + compute.Address(), + ], + next_page_token='abc', + ), + compute.AddressList( + items=[], + next_page_token='def', + ), + compute.AddressList( + items=[ + compute.Address(), + ], + next_page_token='ghi', + ), + compute.AddressList( + items=[ + compute.Address(), + compute.Address(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.AddressList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Address) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AddressesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AddressesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AddressesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.AddressesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_addresses_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.AddressesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_addresses_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.addresses.transports.AddressesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.AddressesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_addresses_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.addresses.transports.AddressesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AddressesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_addresses_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.addresses.transports.AddressesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AddressesTransport() + adc.assert_called_once() + + +def test_addresses_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AddressesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_addresses_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.AddressesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_addresses_host_no_port(): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_addresses_host_with_port(): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = AddressesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = AddressesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AddressesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = AddressesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = AddressesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AddressesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = AddressesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = AddressesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AddressesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = AddressesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = AddressesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AddressesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = AddressesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = AddressesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AddressesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.AddressesTransport, '_prep_wrapped_messages') as prep: + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.AddressesTransport, '_prep_wrapped_messages') as prep: + transport_class = AddressesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = AddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_autoscalers.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_autoscalers.py new file mode 100644 index 000000000..dfeddbc4b --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_autoscalers.py @@ -0,0 +1,1740 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.autoscalers import AutoscalersClient +from google.cloud.compute_v1.services.autoscalers import pagers +from google.cloud.compute_v1.services.autoscalers import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert AutoscalersClient._get_default_mtls_endpoint(None) is None + assert AutoscalersClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert AutoscalersClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert AutoscalersClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert AutoscalersClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert AutoscalersClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + AutoscalersClient, +]) +def test_autoscalers_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.AutoscalersRestTransport, "rest"), +]) +def test_autoscalers_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + AutoscalersClient, +]) +def test_autoscalers_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_autoscalers_client_get_transport_class(): + transport = AutoscalersClient.get_transport_class() + available_transports = [ + transports.AutoscalersRestTransport, + ] + assert transport in available_transports + + transport = AutoscalersClient.get_transport_class("rest") + assert transport == transports.AutoscalersRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalersClient, transports.AutoscalersRestTransport, "rest"), +]) +@mock.patch.object(AutoscalersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalersClient)) +def test_autoscalers_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(AutoscalersClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(AutoscalersClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (AutoscalersClient, transports.AutoscalersRestTransport, "rest", "true"), + (AutoscalersClient, transports.AutoscalersRestTransport, "rest", "false"), +]) +@mock.patch.object(AutoscalersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(AutoscalersClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_autoscalers_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalersClient, transports.AutoscalersRestTransport, "rest"), +]) +def test_autoscalers_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (AutoscalersClient, transports.AutoscalersRestTransport, "rest"), +]) +def test_autoscalers_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListAutoscalersRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AutoscalerAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AutoscalerAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListAutoscalersRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AutoscalerAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AutoscalerAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/autoscalers" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListAutoscalersRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.AutoscalerAggregatedList( + items={ + 'a':compute.AutoscalersScopedList(), + 'b':compute.AutoscalersScopedList(), + 'c':compute.AutoscalersScopedList(), + }, + next_page_token='abc', + ), + compute.AutoscalerAggregatedList( + items={}, + next_page_token='def', + ), + compute.AutoscalerAggregatedList( + items={ + 'g':compute.AutoscalersScopedList(), + }, + next_page_token='ghi', + ), + compute.AutoscalerAggregatedList( + items={ + 'h':compute.AutoscalersScopedList(), + 'i':compute.AutoscalersScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.AutoscalerAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.AutoscalersScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.AutoscalersScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.AutoscalersScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "autoscaler": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + autoscaler='autoscaler_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/autoscalers/{autoscaler}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteAutoscalerRequest(), + project='project_value', + zone='zone_value', + autoscaler='autoscaler_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Autoscaler( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + recommended_size=1693, + region='region_value', + self_link='self_link_value', + status='status_value', + target='target_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Autoscaler.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Autoscaler) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.recommended_size == 1693 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.target == 'target_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Autoscaler() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Autoscaler.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "autoscaler": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + autoscaler='autoscaler_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/autoscalers/{autoscaler}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetAutoscalerRequest(), + project='project_value', + zone='zone_value', + autoscaler='autoscaler_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/autoscalers" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertAutoscalerRequest(), + project='project_value', + zone='zone_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListAutoscalersRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AutoscalerList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AutoscalerList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListAutoscalersRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AutoscalerList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AutoscalerList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/autoscalers" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListAutoscalersRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.AutoscalerList( + items=[ + compute.Autoscaler(), + compute.Autoscaler(), + compute.Autoscaler(), + ], + next_page_token='abc', + ), + compute.AutoscalerList( + items=[], + next_page_token='def', + ), + compute.AutoscalerList( + items=[ + compute.Autoscaler(), + ], + next_page_token='ghi', + ), + compute.AutoscalerList( + items=[ + compute.Autoscaler(), + compute.Autoscaler(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.AutoscalerList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Autoscaler) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/autoscalers" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchAutoscalerRequest(), + project='project_value', + zone='zone_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateAutoscalerRequest): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/autoscalers" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateAutoscalerRequest(), + project='project_value', + zone='zone_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.AutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.AutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalersClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.AutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = AutoscalersClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.AutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = AutoscalersClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.AutoscalersRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_autoscalers_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.AutoscalersTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_autoscalers_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.autoscalers.transports.AutoscalersTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.AutoscalersTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_autoscalers_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.autoscalers.transports.AutoscalersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalersTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_autoscalers_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.autoscalers.transports.AutoscalersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.AutoscalersTransport() + adc.assert_called_once() + + +def test_autoscalers_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + AutoscalersClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_autoscalers_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.AutoscalersRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_autoscalers_host_no_port(): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_autoscalers_host_with_port(): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = AutoscalersClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = AutoscalersClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalersClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = AutoscalersClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = AutoscalersClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalersClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = AutoscalersClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = AutoscalersClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalersClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = AutoscalersClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = AutoscalersClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalersClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = AutoscalersClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = AutoscalersClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = AutoscalersClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.AutoscalersTransport, '_prep_wrapped_messages') as prep: + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.AutoscalersTransport, '_prep_wrapped_messages') as prep: + transport_class = AutoscalersClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = AutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_backend_buckets.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_backend_buckets.py new file mode 100644 index 000000000..f0e9a3129 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_backend_buckets.py @@ -0,0 +1,1849 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.backend_buckets import BackendBucketsClient +from google.cloud.compute_v1.services.backend_buckets import pagers +from google.cloud.compute_v1.services.backend_buckets import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BackendBucketsClient._get_default_mtls_endpoint(None) is None + assert BackendBucketsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert BackendBucketsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert BackendBucketsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert BackendBucketsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert BackendBucketsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + BackendBucketsClient, +]) +def test_backend_buckets_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.BackendBucketsRestTransport, "rest"), +]) +def test_backend_buckets_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + BackendBucketsClient, +]) +def test_backend_buckets_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_backend_buckets_client_get_transport_class(): + transport = BackendBucketsClient.get_transport_class() + available_transports = [ + transports.BackendBucketsRestTransport, + ] + assert transport in available_transports + + transport = BackendBucketsClient.get_transport_class("rest") + assert transport == transports.BackendBucketsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BackendBucketsClient, transports.BackendBucketsRestTransport, "rest"), +]) +@mock.patch.object(BackendBucketsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BackendBucketsClient)) +def test_backend_buckets_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BackendBucketsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BackendBucketsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (BackendBucketsClient, transports.BackendBucketsRestTransport, "rest", "true"), + (BackendBucketsClient, transports.BackendBucketsRestTransport, "rest", "false"), +]) +@mock.patch.object(BackendBucketsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BackendBucketsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_backend_buckets_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BackendBucketsClient, transports.BackendBucketsRestTransport, "rest"), +]) +def test_backend_buckets_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BackendBucketsClient, transports.BackendBucketsRestTransport, "rest"), +]) +def test_backend_buckets_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_signed_url_key_rest(transport: str = 'rest', request_type=compute.AddSignedUrlKeyBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request_init["signed_url_key_resource"] = compute.SignedUrlKey(key_name='key_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_signed_url_key(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_signed_url_key_rest_bad_request(transport: str = 'rest', request_type=compute.AddSignedUrlKeyBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request_init["signed_url_key_resource"] = compute.SignedUrlKey(key_name='key_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_signed_url_key(request) + + +def test_add_signed_url_key_rest_from_dict(): + test_add_signed_url_key_rest(request_type=dict) + + +def test_add_signed_url_key_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_bucket": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_bucket='backend_bucket_value', + signed_url_key_resource=compute.SignedUrlKey(key_name='key_name_value'), + ) + mock_args.update(sample_request) + client.add_signed_url_key(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}/addSignedUrlKey" % client.transport._host, args[1]) + + +def test_add_signed_url_key_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_signed_url_key( + compute.AddSignedUrlKeyBackendBucketRequest(), + project='project_value', + backend_bucket='backend_bucket_value', + signed_url_key_resource=compute.SignedUrlKey(key_name='key_name_value'), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_bucket": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_bucket='backend_bucket_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteBackendBucketRequest(), + project='project_value', + backend_bucket='backend_bucket_value', + ) + + +def test_delete_signed_url_key_rest(transport: str = 'rest', request_type=compute.DeleteSignedUrlKeyBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_signed_url_key(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_signed_url_key_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteSignedUrlKeyBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_signed_url_key(request) + + +def test_delete_signed_url_key_rest_from_dict(): + test_delete_signed_url_key_rest(request_type=dict) + + +def test_delete_signed_url_key_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_bucket": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_bucket='backend_bucket_value', + key_name='key_name_value', + ) + mock_args.update(sample_request) + client.delete_signed_url_key(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}/deleteSignedUrlKey" % client.transport._host, args[1]) + + +def test_delete_signed_url_key_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_signed_url_key( + compute.DeleteSignedUrlKeyBackendBucketRequest(), + project='project_value', + backend_bucket='backend_bucket_value', + key_name='key_name_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendBucket( + bucket_name='bucket_name_value', + creation_timestamp='creation_timestamp_value', + custom_response_headers=['custom_response_headers_value'], + description='description_value', + enable_cdn=True, + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendBucket.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.BackendBucket) + assert response.bucket_name == 'bucket_name_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.custom_response_headers == ['custom_response_headers_value'] + assert response.description == 'description_value' + assert response.enable_cdn is True + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendBucket() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendBucket.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_bucket": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_bucket='backend_bucket_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetBackendBucketRequest(), + project='project_value', + backend_bucket='backend_bucket_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["backend_bucket_resource"] = compute.BackendBucket(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["backend_bucket_resource"] = compute.BackendBucket(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_bucket_resource=compute.BackendBucket(bucket_name='bucket_name_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertBackendBucketRequest(), + project='project_value', + backend_bucket_resource=compute.BackendBucket(bucket_name='bucket_name_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListBackendBucketsRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendBucketList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendBucketList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListBackendBucketsRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendBucketList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendBucketList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListBackendBucketsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.BackendBucketList( + items=[ + compute.BackendBucket(), + compute.BackendBucket(), + compute.BackendBucket(), + ], + next_page_token='abc', + ), + compute.BackendBucketList( + items=[], + next_page_token='def', + ), + compute.BackendBucketList( + items=[ + compute.BackendBucket(), + ], + next_page_token='ghi', + ), + compute.BackendBucketList( + items=[ + compute.BackendBucket(), + compute.BackendBucket(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.BackendBucketList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.BackendBucket) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request_init["backend_bucket_resource"] = compute.BackendBucket(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request_init["backend_bucket_resource"] = compute.BackendBucket(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_bucket": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_bucket='backend_bucket_value', + backend_bucket_resource=compute.BackendBucket(bucket_name='bucket_name_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchBackendBucketRequest(), + project='project_value', + backend_bucket='backend_bucket_value', + backend_bucket_resource=compute.BackendBucket(bucket_name='bucket_name_value'), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request_init["backend_bucket_resource"] = compute.BackendBucket(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateBackendBucketRequest): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_bucket": "sample2"} + request_init["backend_bucket_resource"] = compute.BackendBucket(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_bucket": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_bucket='backend_bucket_value', + backend_bucket_resource=compute.BackendBucket(bucket_name='bucket_name_value'), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendBuckets/{backend_bucket}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateBackendBucketRequest(), + project='project_value', + backend_bucket='backend_bucket_value', + backend_bucket_resource=compute.BackendBucket(bucket_name='bucket_name_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BackendBucketsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BackendBucketsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BackendBucketsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BackendBucketsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BackendBucketsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BackendBucketsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BackendBucketsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.BackendBucketsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_backend_buckets_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BackendBucketsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_backend_buckets_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.backend_buckets.transports.BackendBucketsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.BackendBucketsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_signed_url_key', + 'delete', + 'delete_signed_url_key', + 'get', + 'insert', + 'list', + 'patch', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_backend_buckets_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.backend_buckets.transports.BackendBucketsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BackendBucketsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_backend_buckets_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.backend_buckets.transports.BackendBucketsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BackendBucketsTransport() + adc.assert_called_once() + + +def test_backend_buckets_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BackendBucketsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_backend_buckets_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.BackendBucketsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_backend_buckets_host_no_port(): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_backend_buckets_host_with_port(): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = BackendBucketsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = BackendBucketsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BackendBucketsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = BackendBucketsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = BackendBucketsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BackendBucketsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = BackendBucketsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = BackendBucketsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BackendBucketsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = BackendBucketsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = BackendBucketsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BackendBucketsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = BackendBucketsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = BackendBucketsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BackendBucketsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.BackendBucketsTransport, '_prep_wrapped_messages') as prep: + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.BackendBucketsTransport, '_prep_wrapped_messages') as prep: + transport_class = BackendBucketsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = BackendBucketsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_backend_services.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_backend_services.py new file mode 100644 index 000000000..8be1a786f --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_backend_services.py @@ -0,0 +1,2326 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.backend_services import BackendServicesClient +from google.cloud.compute_v1.services.backend_services import pagers +from google.cloud.compute_v1.services.backend_services import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BackendServicesClient._get_default_mtls_endpoint(None) is None + assert BackendServicesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert BackendServicesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert BackendServicesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert BackendServicesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert BackendServicesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + BackendServicesClient, +]) +def test_backend_services_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.BackendServicesRestTransport, "rest"), +]) +def test_backend_services_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + BackendServicesClient, +]) +def test_backend_services_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_backend_services_client_get_transport_class(): + transport = BackendServicesClient.get_transport_class() + available_transports = [ + transports.BackendServicesRestTransport, + ] + assert transport in available_transports + + transport = BackendServicesClient.get_transport_class("rest") + assert transport == transports.BackendServicesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BackendServicesClient, transports.BackendServicesRestTransport, "rest"), +]) +@mock.patch.object(BackendServicesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BackendServicesClient)) +def test_backend_services_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BackendServicesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BackendServicesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (BackendServicesClient, transports.BackendServicesRestTransport, "rest", "true"), + (BackendServicesClient, transports.BackendServicesRestTransport, "rest", "false"), +]) +@mock.patch.object(BackendServicesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BackendServicesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_backend_services_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BackendServicesClient, transports.BackendServicesRestTransport, "rest"), +]) +def test_backend_services_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BackendServicesClient, transports.BackendServicesRestTransport, "rest"), +]) +def test_backend_services_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_signed_url_key_rest(transport: str = 'rest', request_type=compute.AddSignedUrlKeyBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["signed_url_key_resource"] = compute.SignedUrlKey(key_name='key_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_signed_url_key(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_signed_url_key_rest_bad_request(transport: str = 'rest', request_type=compute.AddSignedUrlKeyBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["signed_url_key_resource"] = compute.SignedUrlKey(key_name='key_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_signed_url_key(request) + + +def test_add_signed_url_key_rest_from_dict(): + test_add_signed_url_key_rest(request_type=dict) + + +def test_add_signed_url_key_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + signed_url_key_resource=compute.SignedUrlKey(key_name='key_name_value'), + ) + mock_args.update(sample_request) + client.add_signed_url_key(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}/addSignedUrlKey" % client.transport._host, args[1]) + + +def test_add_signed_url_key_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_signed_url_key( + compute.AddSignedUrlKeyBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + signed_url_key_resource=compute.SignedUrlKey(key_name='key_name_value'), + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListBackendServicesRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListBackendServicesRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/backendServices" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListBackendServicesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.BackendServiceAggregatedList( + items={ + 'a':compute.BackendServicesScopedList(), + 'b':compute.BackendServicesScopedList(), + 'c':compute.BackendServicesScopedList(), + }, + next_page_token='abc', + ), + compute.BackendServiceAggregatedList( + items={}, + next_page_token='def', + ), + compute.BackendServiceAggregatedList( + items={ + 'g':compute.BackendServicesScopedList(), + }, + next_page_token='ghi', + ), + compute.BackendServiceAggregatedList( + items={ + 'h':compute.BackendServicesScopedList(), + 'i':compute.BackendServicesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.BackendServiceAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.BackendServicesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.BackendServicesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.BackendServicesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + ) + + +def test_delete_signed_url_key_rest(transport: str = 'rest', request_type=compute.DeleteSignedUrlKeyBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_signed_url_key(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_signed_url_key_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteSignedUrlKeyBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_signed_url_key(request) + + +def test_delete_signed_url_key_rest_from_dict(): + test_delete_signed_url_key_rest(request_type=dict) + + +def test_delete_signed_url_key_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + key_name='key_name_value', + ) + mock_args.update(sample_request) + client.delete_signed_url_key(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}/deleteSignedUrlKey" % client.transport._host, args[1]) + + +def test_delete_signed_url_key_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_signed_url_key( + compute.DeleteSignedUrlKeyBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + key_name='key_name_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendService( + affinity_cookie_ttl_sec=2432, + creation_timestamp='creation_timestamp_value', + custom_request_headers=['custom_request_headers_value'], + custom_response_headers=['custom_response_headers_value'], + description='description_value', + enable_c_d_n=True, + fingerprint='fingerprint_value', + health_checks=['health_checks_value'], + id=205, + kind='kind_value', + load_balancing_scheme='load_balancing_scheme_value', + locality_lb_policy='locality_lb_policy_value', + name='name_value', + network='network_value', + port=453, + port_name='port_name_value', + protocol='protocol_value', + region='region_value', + security_policy='security_policy_value', + self_link='self_link_value', + session_affinity='session_affinity_value', + timeout_sec=1185, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendService.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.BackendService) + assert response.affinity_cookie_ttl_sec == 2432 + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.custom_request_headers == ['custom_request_headers_value'] + assert response.custom_response_headers == ['custom_response_headers_value'] + assert response.description == 'description_value' + assert response.enable_c_d_n is True + assert response.fingerprint == 'fingerprint_value' + assert response.health_checks == ['health_checks_value'] + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.load_balancing_scheme == 'load_balancing_scheme_value' + assert response.locality_lb_policy == 'locality_lb_policy_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.port == 453 + assert response.port_name == 'port_name_value' + assert response.protocol == 'protocol_value' + assert response.region == 'region_value' + assert response.security_policy == 'security_policy_value' + assert response.self_link == 'self_link_value' + assert response.session_affinity == 'session_affinity_value' + assert response.timeout_sec == 1185 + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendService() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendService.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + ) + + +def test_get_health_rest(transport: str = 'rest', request_type=compute.GetHealthBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["resource_group_reference_resource"] = compute.ResourceGroupReference(group='group_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceGroupHealth( + kind='kind_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceGroupHealth.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_health(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.BackendServiceGroupHealth) + assert response.kind == 'kind_value' + + +def test_get_health_rest_bad_request(transport: str = 'rest', request_type=compute.GetHealthBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["resource_group_reference_resource"] = compute.ResourceGroupReference(group='group_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_health(request) + + +def test_get_health_rest_from_dict(): + test_get_health_rest(request_type=dict) + + +def test_get_health_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceGroupHealth() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceGroupHealth.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + resource_group_reference_resource=compute.ResourceGroupReference(group='group_value'), + ) + mock_args.update(sample_request) + client.get_health(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}/getHealth" % client.transport._host, args[1]) + + +def test_get_health_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_health( + compute.GetHealthBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + resource_group_reference_resource=compute.ResourceGroupReference(group='group_value'), + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertBackendServiceRequest(), + project='project_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListBackendServicesRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListBackendServicesRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListBackendServicesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.BackendServiceList( + items=[ + compute.BackendService(), + compute.BackendService(), + compute.BackendService(), + ], + next_page_token='abc', + ), + compute.BackendServiceList( + items=[], + next_page_token='def', + ), + compute.BackendServiceList( + items=[ + compute.BackendService(), + ], + next_page_token='ghi', + ), + compute.BackendServiceList( + items=[ + compute.BackendService(), + compute.BackendService(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.BackendServiceList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.BackendService) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + + +def test_set_security_policy_rest(transport: str = 'rest', request_type=compute.SetSecurityPolicyBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["security_policy_reference_resource"] = compute.SecurityPolicyReference(security_policy='security_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_security_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_security_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetSecurityPolicyBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["security_policy_reference_resource"] = compute.SecurityPolicyReference(security_policy='security_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_security_policy(request) + + +def test_set_security_policy_rest_from_dict(): + test_set_security_policy_rest(request_type=dict) + + +def test_set_security_policy_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + security_policy_reference_resource=compute.SecurityPolicyReference(security_policy='security_policy_value'), + ) + mock_args.update(sample_request) + client.set_security_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}/setSecurityPolicy" % client.transport._host, args[1]) + + +def test_set_security_policy_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_security_policy( + compute.SetSecurityPolicyBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + security_policy_reference_resource=compute.SecurityPolicyReference(security_policy='security_policy_value'), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateBackendServiceRequest): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "backend_service": "sample2"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "backend_service": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateBackendServiceRequest(), + project='project_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BackendServicesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BackendServicesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BackendServicesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.BackendServicesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_backend_services_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BackendServicesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_backend_services_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.backend_services.transports.BackendServicesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.BackendServicesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_signed_url_key', + 'aggregated_list', + 'delete', + 'delete_signed_url_key', + 'get', + 'get_health', + 'insert', + 'list', + 'patch', + 'set_security_policy', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_backend_services_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.backend_services.transports.BackendServicesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BackendServicesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_backend_services_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.backend_services.transports.BackendServicesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BackendServicesTransport() + adc.assert_called_once() + + +def test_backend_services_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BackendServicesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_backend_services_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.BackendServicesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_backend_services_host_no_port(): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_backend_services_host_with_port(): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = BackendServicesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = BackendServicesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BackendServicesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = BackendServicesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = BackendServicesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BackendServicesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = BackendServicesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = BackendServicesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BackendServicesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = BackendServicesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = BackendServicesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BackendServicesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = BackendServicesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = BackendServicesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BackendServicesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.BackendServicesTransport, '_prep_wrapped_messages') as prep: + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.BackendServicesTransport, '_prep_wrapped_messages') as prep: + transport_class = BackendServicesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = BackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_disk_types.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_disk_types.py new file mode 100644 index 000000000..207eecac9 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_disk_types.py @@ -0,0 +1,1126 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.disk_types import DiskTypesClient +from google.cloud.compute_v1.services.disk_types import pagers +from google.cloud.compute_v1.services.disk_types import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DiskTypesClient._get_default_mtls_endpoint(None) is None + assert DiskTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert DiskTypesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert DiskTypesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert DiskTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert DiskTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + DiskTypesClient, +]) +def test_disk_types_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.DiskTypesRestTransport, "rest"), +]) +def test_disk_types_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + DiskTypesClient, +]) +def test_disk_types_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_disk_types_client_get_transport_class(): + transport = DiskTypesClient.get_transport_class() + available_transports = [ + transports.DiskTypesRestTransport, + ] + assert transport in available_transports + + transport = DiskTypesClient.get_transport_class("rest") + assert transport == transports.DiskTypesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DiskTypesClient, transports.DiskTypesRestTransport, "rest"), +]) +@mock.patch.object(DiskTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DiskTypesClient)) +def test_disk_types_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DiskTypesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DiskTypesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (DiskTypesClient, transports.DiskTypesRestTransport, "rest", "true"), + (DiskTypesClient, transports.DiskTypesRestTransport, "rest", "false"), +]) +@mock.patch.object(DiskTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DiskTypesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_disk_types_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DiskTypesClient, transports.DiskTypesRestTransport, "rest"), +]) +def test_disk_types_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DiskTypesClient, transports.DiskTypesRestTransport, "rest"), +]) +def test_disk_types_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListDiskTypesRequest): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskTypeAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskTypeAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListDiskTypesRequest): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskTypeAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskTypeAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/diskTypes" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListDiskTypesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.DiskTypeAggregatedList( + items={ + 'a':compute.DiskTypesScopedList(), + 'b':compute.DiskTypesScopedList(), + 'c':compute.DiskTypesScopedList(), + }, + next_page_token='abc', + ), + compute.DiskTypeAggregatedList( + items={}, + next_page_token='def', + ), + compute.DiskTypeAggregatedList( + items={ + 'g':compute.DiskTypesScopedList(), + }, + next_page_token='ghi', + ), + compute.DiskTypeAggregatedList( + items={ + 'h':compute.DiskTypesScopedList(), + 'i':compute.DiskTypesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.DiskTypeAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.DiskTypesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.DiskTypesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.DiskTypesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetDiskTypeRequest): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskType( + creation_timestamp='creation_timestamp_value', + default_disk_size_gb=2097, + description='description_value', + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + valid_disk_size='valid_disk_size_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskType.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.DiskType) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_disk_size_gb == 2097 + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.valid_disk_size == 'valid_disk_size_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetDiskTypeRequest): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskType() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskType.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "disk_type": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk_type='disk_type_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/diskTypes/{disk_type}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetDiskTypeRequest(), + project='project_value', + zone='zone_value', + disk_type='disk_type_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListDiskTypesRequest): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskTypeList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskTypeList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListDiskTypesRequest): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskTypeList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskTypeList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/diskTypes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListDiskTypesRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.DiskTypeList( + items=[ + compute.DiskType(), + compute.DiskType(), + compute.DiskType(), + ], + next_page_token='abc', + ), + compute.DiskTypeList( + items=[], + next_page_token='def', + ), + compute.DiskTypeList( + items=[ + compute.DiskType(), + ], + next_page_token='ghi', + ), + compute.DiskTypeList( + items=[ + compute.DiskType(), + compute.DiskType(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.DiskTypeList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.DiskType) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DiskTypesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DiskTypesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DiskTypesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.DiskTypesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_disk_types_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DiskTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_disk_types_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.disk_types.transports.DiskTypesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.DiskTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_disk_types_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.disk_types.transports.DiskTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DiskTypesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_disk_types_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.disk_types.transports.DiskTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DiskTypesTransport() + adc.assert_called_once() + + +def test_disk_types_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DiskTypesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_disk_types_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.DiskTypesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_disk_types_host_no_port(): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_disk_types_host_with_port(): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = DiskTypesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = DiskTypesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DiskTypesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = DiskTypesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = DiskTypesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DiskTypesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = DiskTypesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = DiskTypesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DiskTypesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = DiskTypesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = DiskTypesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DiskTypesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = DiskTypesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = DiskTypesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DiskTypesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.DiskTypesTransport, '_prep_wrapped_messages') as prep: + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.DiskTypesTransport, '_prep_wrapped_messages') as prep: + transport_class = DiskTypesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = DiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_disks.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_disks.py new file mode 100644 index 000000000..271495f68 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_disks.py @@ -0,0 +1,2598 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.disks import DisksClient +from google.cloud.compute_v1.services.disks import pagers +from google.cloud.compute_v1.services.disks import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert DisksClient._get_default_mtls_endpoint(None) is None + assert DisksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert DisksClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert DisksClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert DisksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert DisksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + DisksClient, +]) +def test_disks_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.DisksRestTransport, "rest"), +]) +def test_disks_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + DisksClient, +]) +def test_disks_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_disks_client_get_transport_class(): + transport = DisksClient.get_transport_class() + available_transports = [ + transports.DisksRestTransport, + ] + assert transport in available_transports + + transport = DisksClient.get_transport_class("rest") + assert transport == transports.DisksRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DisksClient, transports.DisksRestTransport, "rest"), +]) +@mock.patch.object(DisksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DisksClient)) +def test_disks_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(DisksClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(DisksClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (DisksClient, transports.DisksRestTransport, "rest", "true"), + (DisksClient, transports.DisksRestTransport, "rest", "false"), +]) +@mock.patch.object(DisksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DisksClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_disks_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DisksClient, transports.DisksRestTransport, "rest"), +]) +def test_disks_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (DisksClient, transports.DisksRestTransport, "rest"), +]) +def test_disks_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_resource_policies_rest(transport: str = 'rest', request_type=compute.AddResourcePoliciesDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["disks_add_resource_policies_request_resource"] = compute.DisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_resource_policies(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_resource_policies_rest_bad_request(transport: str = 'rest', request_type=compute.AddResourcePoliciesDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["disks_add_resource_policies_request_resource"] = compute.DisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_resource_policies(request) + + +def test_add_resource_policies_rest_from_dict(): + test_add_resource_policies_rest(request_type=dict) + + +def test_add_resource_policies_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk='disk_value', + disks_add_resource_policies_request_resource=compute.DisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + mock_args.update(sample_request) + client.add_resource_policies(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/addResourcePolicies" % client.transport._host, args[1]) + + +def test_add_resource_policies_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_resource_policies( + compute.AddResourcePoliciesDiskRequest(), + project='project_value', + zone='zone_value', + disk='disk_value', + disks_add_resource_policies_request_resource=compute.DisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListDisksRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListDisksRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/disks" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListDisksRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.DiskAggregatedList( + items={ + 'a':compute.DisksScopedList(), + 'b':compute.DisksScopedList(), + 'c':compute.DisksScopedList(), + }, + next_page_token='abc', + ), + compute.DiskAggregatedList( + items={}, + next_page_token='def', + ), + compute.DiskAggregatedList( + items={ + 'g':compute.DisksScopedList(), + }, + next_page_token='ghi', + ), + compute.DiskAggregatedList( + items={ + 'h':compute.DisksScopedList(), + 'i':compute.DisksScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.DiskAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.DisksScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.DisksScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.DisksScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_create_snapshot_rest(transport: str = 'rest', request_type=compute.CreateSnapshotDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["snapshot_resource"] = compute.Snapshot(auto_created=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_snapshot(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_create_snapshot_rest_bad_request(transport: str = 'rest', request_type=compute.CreateSnapshotDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["snapshot_resource"] = compute.Snapshot(auto_created=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_snapshot(request) + + +def test_create_snapshot_rest_from_dict(): + test_create_snapshot_rest(request_type=dict) + + +def test_create_snapshot_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk='disk_value', + snapshot_resource=compute.Snapshot(auto_created=True), + ) + mock_args.update(sample_request) + client.create_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/createSnapshot" % client.transport._host, args[1]) + + +def test_create_snapshot_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_snapshot( + compute.CreateSnapshotDiskRequest(), + project='project_value', + zone='zone_value', + disk='disk_value', + snapshot_resource=compute.Snapshot(auto_created=True), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk='disk_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{disk}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteDiskRequest(), + project='project_value', + zone='zone_value', + disk='disk_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Disk( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + last_attach_timestamp='last_attach_timestamp_value', + last_detach_timestamp='last_detach_timestamp_value', + license_codes=[1360], + licenses=['licenses_value'], + location_hint='location_hint_value', + name='name_value', + options='options_value', + physical_block_size_bytes=2663, + provisioned_iops=1740, + region='region_value', + replica_zones=['replica_zones_value'], + resource_policies=['resource_policies_value'], + satisfies_pzs=True, + self_link='self_link_value', + size_gb=739, + source_disk='source_disk_value', + source_disk_id='source_disk_id_value', + source_image='source_image_value', + source_image_id='source_image_id_value', + source_snapshot='source_snapshot_value', + source_snapshot_id='source_snapshot_id_value', + source_storage_object='source_storage_object_value', + status='status_value', + type_='type__value', + users=['users_value'], + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Disk.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Disk) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.last_attach_timestamp == 'last_attach_timestamp_value' + assert response.last_detach_timestamp == 'last_detach_timestamp_value' + assert response.license_codes == [1360] + assert response.licenses == ['licenses_value'] + assert response.location_hint == 'location_hint_value' + assert response.name == 'name_value' + assert response.options == 'options_value' + assert response.physical_block_size_bytes == 2663 + assert response.provisioned_iops == 1740 + assert response.region == 'region_value' + assert response.replica_zones == ['replica_zones_value'] + assert response.resource_policies == ['resource_policies_value'] + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.size_gb == 739 + assert response.source_disk == 'source_disk_value' + assert response.source_disk_id == 'source_disk_id_value' + assert response.source_image == 'source_image_value' + assert response.source_image_id == 'source_image_id_value' + assert response.source_snapshot == 'source_snapshot_value' + assert response.source_snapshot_id == 'source_snapshot_id_value' + assert response.source_storage_object == 'source_storage_object_value' + assert response.status == 'status_value' + assert response.type_ == 'type__value' + assert response.users == ['users_value'] + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Disk() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Disk.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk='disk_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{disk}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetDiskRequest(), + project='project_value', + zone='zone_value', + disk='disk_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyDiskRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["disk_resource"] = compute.Disk(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["disk_resource"] = compute.Disk(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk_resource=compute.Disk(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertDiskRequest(), + project='project_value', + zone='zone_value', + disk_resource=compute.Disk(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListDisksRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListDisksRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListDisksRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.DiskList( + items=[ + compute.Disk(), + compute.Disk(), + compute.Disk(), + ], + next_page_token='abc', + ), + compute.DiskList( + items=[], + next_page_token='def', + ), + compute.DiskList( + items=[ + compute.Disk(), + ], + next_page_token='ghi', + ), + compute.DiskList( + items=[ + compute.Disk(), + compute.Disk(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.DiskList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Disk) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_remove_resource_policies_rest(transport: str = 'rest', request_type=compute.RemoveResourcePoliciesDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["disks_remove_resource_policies_request_resource"] = compute.DisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_resource_policies(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_resource_policies_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveResourcePoliciesDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["disks_remove_resource_policies_request_resource"] = compute.DisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_resource_policies(request) + + +def test_remove_resource_policies_rest_from_dict(): + test_remove_resource_policies_rest(request_type=dict) + + +def test_remove_resource_policies_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk='disk_value', + disks_remove_resource_policies_request_resource=compute.DisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + mock_args.update(sample_request) + client.remove_resource_policies(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/removeResourcePolicies" % client.transport._host, args[1]) + + +def test_remove_resource_policies_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_resource_policies( + compute.RemoveResourcePoliciesDiskRequest(), + project='project_value', + zone='zone_value', + disk='disk_value', + disks_remove_resource_policies_request_resource=compute.DisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + + +def test_resize_rest(transport: str = 'rest', request_type=compute.ResizeDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["disks_resize_request_resource"] = compute.DisksResizeRequest(size_gb=739) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.resize(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_resize_rest_bad_request(transport: str = 'rest', request_type=compute.ResizeDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + request_init["disks_resize_request_resource"] = compute.DisksResizeRequest(size_gb=739) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.resize(request) + + +def test_resize_rest_from_dict(): + test_resize_rest(request_type=dict) + + +def test_resize_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + disk='disk_value', + disks_resize_request_resource=compute.DisksResizeRequest(size_gb=739), + ) + mock_args.update(sample_request) + client.resize(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{disk}/resize" % client.transport._host, args[1]) + + +def test_resize_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resize( + compute.ResizeDiskRequest(), + project='project_value', + zone='zone_value', + disk='disk_value', + disks_resize_request_resource=compute.DisksResizeRequest(size_gb=739), + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyDiskRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_labels_request_resource"] = compute.ZoneSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_labels_request_resource"] = compute.ZoneSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_labels_request_resource=compute.ZoneSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsDiskRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_labels_request_resource=compute.ZoneSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsDiskRequest): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/disks/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsDiskRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.DisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.DisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DisksClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.DisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = DisksClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.DisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = DisksClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.DisksRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_disks_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.DisksTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_disks_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.disks.transports.DisksTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.DisksTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_resource_policies', + 'aggregated_list', + 'create_snapshot', + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'remove_resource_policies', + 'resize', + 'set_iam_policy', + 'set_labels', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_disks_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.disks.transports.DisksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DisksTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_disks_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.disks.transports.DisksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.DisksTransport() + adc.assert_called_once() + + +def test_disks_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + DisksClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_disks_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.DisksRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_disks_host_no_port(): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_disks_host_with_port(): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = DisksClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = DisksClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = DisksClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = DisksClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = DisksClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = DisksClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = DisksClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = DisksClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = DisksClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = DisksClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = DisksClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = DisksClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = DisksClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = DisksClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = DisksClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.DisksTransport, '_prep_wrapped_messages') as prep: + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.DisksTransport, '_prep_wrapped_messages') as prep: + transport_class = DisksClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = DisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_external_vpn_gateways.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_external_vpn_gateways.py new file mode 100644 index 000000000..aa2017456 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_external_vpn_gateways.py @@ -0,0 +1,1501 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.external_vpn_gateways import ExternalVpnGatewaysClient +from google.cloud.compute_v1.services.external_vpn_gateways import pagers +from google.cloud.compute_v1.services.external_vpn_gateways import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ExternalVpnGatewaysClient._get_default_mtls_endpoint(None) is None + assert ExternalVpnGatewaysClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ExternalVpnGatewaysClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ExternalVpnGatewaysClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ExternalVpnGatewaysClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ExternalVpnGatewaysClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ExternalVpnGatewaysClient, +]) +def test_external_vpn_gateways_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ExternalVpnGatewaysRestTransport, "rest"), +]) +def test_external_vpn_gateways_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ExternalVpnGatewaysClient, +]) +def test_external_vpn_gateways_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_external_vpn_gateways_client_get_transport_class(): + transport = ExternalVpnGatewaysClient.get_transport_class() + available_transports = [ + transports.ExternalVpnGatewaysRestTransport, + ] + assert transport in available_transports + + transport = ExternalVpnGatewaysClient.get_transport_class("rest") + assert transport == transports.ExternalVpnGatewaysRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport, "rest"), +]) +@mock.patch.object(ExternalVpnGatewaysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ExternalVpnGatewaysClient)) +def test_external_vpn_gateways_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ExternalVpnGatewaysClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ExternalVpnGatewaysClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport, "rest", "true"), + (ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport, "rest", "false"), +]) +@mock.patch.object(ExternalVpnGatewaysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ExternalVpnGatewaysClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_external_vpn_gateways_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport, "rest"), +]) +def test_external_vpn_gateways_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ExternalVpnGatewaysClient, transports.ExternalVpnGatewaysRestTransport, "rest"), +]) +def test_external_vpn_gateways_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "external_vpn_gateway": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "external_vpn_gateway": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "external_vpn_gateway": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + external_vpn_gateway='external_vpn_gateway_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/externalVpnGateways/{external_vpn_gateway}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteExternalVpnGatewayRequest(), + project='project_value', + external_vpn_gateway='external_vpn_gateway_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "external_vpn_gateway": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ExternalVpnGateway( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + name='name_value', + redundancy_type='redundancy_type_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ExternalVpnGateway.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.ExternalVpnGateway) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.name == 'name_value' + assert response.redundancy_type == 'redundancy_type_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "external_vpn_gateway": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ExternalVpnGateway() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ExternalVpnGateway.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "external_vpn_gateway": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + external_vpn_gateway='external_vpn_gateway_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/externalVpnGateways/{external_vpn_gateway}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetExternalVpnGatewayRequest(), + project='project_value', + external_vpn_gateway='external_vpn_gateway_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["external_vpn_gateway_resource"] = compute.ExternalVpnGateway(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["external_vpn_gateway_resource"] = compute.ExternalVpnGateway(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + external_vpn_gateway_resource=compute.ExternalVpnGateway(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/externalVpnGateways" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertExternalVpnGatewayRequest(), + project='project_value', + external_vpn_gateway_resource=compute.ExternalVpnGateway(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListExternalVpnGatewaysRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ExternalVpnGatewayList( + etag='etag_value', + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ExternalVpnGatewayList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.etag == 'etag_value' + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListExternalVpnGatewaysRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ExternalVpnGatewayList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ExternalVpnGatewayList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/externalVpnGateways" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListExternalVpnGatewaysRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ExternalVpnGatewayList( + items=[ + compute.ExternalVpnGateway(), + compute.ExternalVpnGateway(), + compute.ExternalVpnGateway(), + ], + next_page_token='abc', + ), + compute.ExternalVpnGatewayList( + items=[], + next_page_token='def', + ), + compute.ExternalVpnGatewayList( + items=[ + compute.ExternalVpnGateway(), + ], + next_page_token='ghi', + ), + compute.ExternalVpnGatewayList( + items=[ + compute.ExternalVpnGateway(), + compute.ExternalVpnGateway(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ExternalVpnGatewayList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ExternalVpnGateway) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/externalVpnGateways/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsExternalVpnGatewayRequest(), + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsExternalVpnGatewayRequest): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/externalVpnGateways/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsExternalVpnGatewayRequest(), + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ExternalVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ExternalVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExternalVpnGatewaysClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ExternalVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ExternalVpnGatewaysClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ExternalVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ExternalVpnGatewaysClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ExternalVpnGatewaysRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_external_vpn_gateways_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ExternalVpnGatewaysTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_external_vpn_gateways_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.external_vpn_gateways.transports.ExternalVpnGatewaysTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ExternalVpnGatewaysTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'set_labels', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_external_vpn_gateways_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.external_vpn_gateways.transports.ExternalVpnGatewaysTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ExternalVpnGatewaysTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_external_vpn_gateways_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.external_vpn_gateways.transports.ExternalVpnGatewaysTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ExternalVpnGatewaysTransport() + adc.assert_called_once() + + +def test_external_vpn_gateways_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ExternalVpnGatewaysClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_external_vpn_gateways_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ExternalVpnGatewaysRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_external_vpn_gateways_host_no_port(): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_external_vpn_gateways_host_with_port(): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ExternalVpnGatewaysClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ExternalVpnGatewaysClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ExternalVpnGatewaysClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ExternalVpnGatewaysClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ExternalVpnGatewaysClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ExternalVpnGatewaysClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ExternalVpnGatewaysClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ExternalVpnGatewaysClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ExternalVpnGatewaysClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ExternalVpnGatewaysClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ExternalVpnGatewaysClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ExternalVpnGatewaysClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ExternalVpnGatewaysClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ExternalVpnGatewaysClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ExternalVpnGatewaysClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ExternalVpnGatewaysTransport, '_prep_wrapped_messages') as prep: + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ExternalVpnGatewaysTransport, '_prep_wrapped_messages') as prep: + transport_class = ExternalVpnGatewaysClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ExternalVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_firewall_policies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_firewall_policies.py new file mode 100644 index 000000000..3d9745f31 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_firewall_policies.py @@ -0,0 +1,3013 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.firewall_policies import FirewallPoliciesClient +from google.cloud.compute_v1.services.firewall_policies import pagers +from google.cloud.compute_v1.services.firewall_policies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FirewallPoliciesClient._get_default_mtls_endpoint(None) is None + assert FirewallPoliciesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FirewallPoliciesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FirewallPoliciesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FirewallPoliciesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FirewallPoliciesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + FirewallPoliciesClient, +]) +def test_firewall_policies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FirewallPoliciesRestTransport, "rest"), +]) +def test_firewall_policies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + FirewallPoliciesClient, +]) +def test_firewall_policies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_firewall_policies_client_get_transport_class(): + transport = FirewallPoliciesClient.get_transport_class() + available_transports = [ + transports.FirewallPoliciesRestTransport, + ] + assert transport in available_transports + + transport = FirewallPoliciesClient.get_transport_class("rest") + assert transport == transports.FirewallPoliciesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FirewallPoliciesClient, transports.FirewallPoliciesRestTransport, "rest"), +]) +@mock.patch.object(FirewallPoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallPoliciesClient)) +def test_firewall_policies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FirewallPoliciesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FirewallPoliciesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FirewallPoliciesClient, transports.FirewallPoliciesRestTransport, "rest", "true"), + (FirewallPoliciesClient, transports.FirewallPoliciesRestTransport, "rest", "false"), +]) +@mock.patch.object(FirewallPoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallPoliciesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_firewall_policies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FirewallPoliciesClient, transports.FirewallPoliciesRestTransport, "rest"), +]) +def test_firewall_policies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FirewallPoliciesClient, transports.FirewallPoliciesRestTransport, "rest"), +]) +def test_firewall_policies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_association_rest(transport: str = 'rest', request_type=compute.AddAssociationFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_association_resource"] = compute.FirewallPolicyAssociation(attachment_target='attachment_target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_association(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_association_rest_bad_request(transport: str = 'rest', request_type=compute.AddAssociationFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_association_resource"] = compute.FirewallPolicyAssociation(attachment_target='attachment_target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_association(request) + + +def test_add_association_rest_from_dict(): + test_add_association_rest(request_type=dict) + + +def test_add_association_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + firewall_policy_association_resource=compute.FirewallPolicyAssociation(attachment_target='attachment_target_value'), + ) + mock_args.update(sample_request) + client.add_association(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/addAssociation" % client.transport._host, args[1]) + + +def test_add_association_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_association( + compute.AddAssociationFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + firewall_policy_association_resource=compute.FirewallPolicyAssociation(attachment_target='attachment_target_value'), + ) + + +def test_add_rule_rest(transport: str = 'rest', request_type=compute.AddRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_rule_resource"] = compute.FirewallPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_rule_rest_bad_request(transport: str = 'rest', request_type=compute.AddRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_rule_resource"] = compute.FirewallPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_rule(request) + + +def test_add_rule_rest_from_dict(): + test_add_rule_rest(request_type=dict) + + +def test_add_rule_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + firewall_policy_rule_resource=compute.FirewallPolicyRule(action='action_value'), + ) + mock_args.update(sample_request) + client.add_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/addRule" % client.transport._host, args[1]) + + +def test_add_rule_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_rule( + compute.AddRuleFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + firewall_policy_rule_resource=compute.FirewallPolicyRule(action='action_value'), + ) + + +def test_clone_rules_rest(transport: str = 'rest', request_type=compute.CloneRulesFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.clone_rules(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_clone_rules_rest_bad_request(transport: str = 'rest', request_type=compute.CloneRulesFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.clone_rules(request) + + +def test_clone_rules_rest_from_dict(): + test_clone_rules_rest(request_type=dict) + + +def test_clone_rules_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + ) + mock_args.update(sample_request) + client.clone_rules(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/cloneRules" % client.transport._host, args[1]) + + +def test_clone_rules_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.clone_rules( + compute.CloneRulesFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPolicy( + creation_timestamp='creation_timestamp_value', + description='description_value', + display_name='display_name_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + parent='parent_value', + rule_tuple_count=1737, + self_link='self_link_value', + self_link_with_id='self_link_with_id_value', + short_name='short_name_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPolicy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.FirewallPolicy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.display_name == 'display_name_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.parent == 'parent_value' + assert response.rule_tuple_count == 1737 + assert response.self_link == 'self_link_value' + assert response.self_link_with_id == 'self_link_with_id_value' + assert response.short_name == 'short_name_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPolicy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPolicy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + ) + + +def test_get_association_rest(transport: str = 'rest', request_type=compute.GetAssociationFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPolicyAssociation( + attachment_target='attachment_target_value', + display_name='display_name_value', + firewall_policy_id='firewall_policy_id_value', + name='name_value', + short_name='short_name_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPolicyAssociation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_association(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.FirewallPolicyAssociation) + assert response.attachment_target == 'attachment_target_value' + assert response.display_name == 'display_name_value' + assert response.firewall_policy_id == 'firewall_policy_id_value' + assert response.name == 'name_value' + assert response.short_name == 'short_name_value' + + +def test_get_association_rest_bad_request(transport: str = 'rest', request_type=compute.GetAssociationFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_association(request) + + +def test_get_association_rest_from_dict(): + test_get_association_rest(request_type=dict) + + +def test_get_association_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPolicyAssociation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPolicyAssociation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + ) + mock_args.update(sample_request) + client.get_association(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/getAssociation" % client.transport._host, args[1]) + + +def test_get_association_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_association( + compute.GetAssociationFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyFirewallPolicyRequest(), + resource='resource_value', + ) + + +def test_get_rule_rest(transport: str = 'rest', request_type=compute.GetRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPolicyRule( + action='action_value', + description='description_value', + direction='direction_value', + disabled=True, + enable_logging=True, + kind='kind_value', + priority=898, + rule_tuple_count=1737, + target_resources=['target_resources_value'], + target_service_accounts=['target_service_accounts_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPolicyRule.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.FirewallPolicyRule) + assert response.action == 'action_value' + assert response.description == 'description_value' + assert response.direction == 'direction_value' + assert response.disabled is True + assert response.enable_logging is True + assert response.kind == 'kind_value' + assert response.priority == 898 + assert response.rule_tuple_count == 1737 + assert response.target_resources == ['target_resources_value'] + assert response.target_service_accounts == ['target_service_accounts_value'] + + +def test_get_rule_rest_bad_request(transport: str = 'rest', request_type=compute.GetRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_rule(request) + + +def test_get_rule_rest_from_dict(): + test_get_rule_rest(request_type=dict) + + +def test_get_rule_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPolicyRule() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPolicyRule.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + ) + mock_args.update(sample_request) + client.get_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/getRule" % client.transport._host, args[1]) + + +def test_get_rule_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_rule( + compute.GetRuleFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request_init["firewall_policy_resource"] = compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request_init["firewall_policy_resource"] = compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + parent_id='parent_id_value', + firewall_policy_resource=compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertFirewallPolicyRequest(), + parent_id='parent_id_value', + firewall_policy_resource=compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListFirewallPoliciesRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPolicyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPolicyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListFirewallPoliciesRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_pager(): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.FirewallPolicyList( + items=[ + compute.FirewallPolicy(), + compute.FirewallPolicy(), + compute.FirewallPolicy(), + ], + next_page_token='abc', + ), + compute.FirewallPolicyList( + items=[], + next_page_token='def', + ), + compute.FirewallPolicyList( + items=[ + compute.FirewallPolicy(), + ], + next_page_token='ghi', + ), + compute.FirewallPolicyList( + items=[ + compute.FirewallPolicy(), + compute.FirewallPolicy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.FirewallPolicyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.FirewallPolicy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_associations_rest(transport: str = 'rest', request_type=compute.ListAssociationsFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallPoliciesListAssociationsResponse( + kind='kind_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallPoliciesListAssociationsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_associations(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.FirewallPoliciesListAssociationsResponse) + assert response.kind == 'kind_value' + + +def test_list_associations_rest_bad_request(transport: str = 'rest', request_type=compute.ListAssociationsFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_associations(request) + + +def test_list_associations_rest_from_dict(): + test_list_associations_rest(request_type=dict) + + +def test_move_rest(transport: str = 'rest', request_type=compute.MoveFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.move(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_move_rest_bad_request(transport: str = 'rest', request_type=compute.MoveFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.move(request) + + +def test_move_rest_from_dict(): + test_move_rest(request_type=dict) + + +def test_move_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + parent_id='parent_id_value', + ) + mock_args.update(sample_request) + client.move(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/move" % client.transport._host, args[1]) + + +def test_move_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.move( + compute.MoveFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + parent_id='parent_id_value', + ) + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_resource"] = compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_resource"] = compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + firewall_policy_resource=compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + firewall_policy_resource=compute.FirewallPolicy(associations=[compute.FirewallPolicyAssociation(attachment_target='attachment_target_value')]), + ) + + +def test_patch_rule_rest(transport: str = 'rest', request_type=compute.PatchRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_rule_resource"] = compute.FirewallPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rule_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request_init["firewall_policy_rule_resource"] = compute.FirewallPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch_rule(request) + + +def test_patch_rule_rest_from_dict(): + test_patch_rule_rest(request_type=dict) + + +def test_patch_rule_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + firewall_policy_rule_resource=compute.FirewallPolicyRule(action='action_value'), + ) + mock_args.update(sample_request) + client.patch_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/patchRule" % client.transport._host, args[1]) + + +def test_patch_rule_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch_rule( + compute.PatchRuleFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + firewall_policy_rule_resource=compute.FirewallPolicyRule(action='action_value'), + ) + + +def test_remove_association_rest(transport: str = 'rest', request_type=compute.RemoveAssociationFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_association(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_association_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveAssociationFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_association(request) + + +def test_remove_association_rest_from_dict(): + test_remove_association_rest(request_type=dict) + + +def test_remove_association_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + ) + mock_args.update(sample_request) + client.remove_association(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/removeAssociation" % client.transport._host, args[1]) + + +def test_remove_association_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_association( + compute.RemoveAssociationFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + ) + + +def test_remove_rule_rest(transport: str = 'rest', request_type=compute.RemoveRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_rule_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveRuleFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"firewall_policy": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_rule(request) + + +def test_remove_rule_rest_from_dict(): + test_remove_rule_rest(request_type=dict) + + +def test_remove_rule_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"firewall_policy": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + firewall_policy='firewall_policy_value', + ) + mock_args.update(sample_request) + client.remove_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{firewall_policy}/removeRule" % client.transport._host, args[1]) + + +def test_remove_rule_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_rule( + compute.RemoveRuleFirewallPolicyRequest(), + firewall_policy='firewall_policy_value', + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "sample1"} + request_init["global_organization_set_policy_request_resource"] = compute.GlobalOrganizationSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "sample1"} + request_init["global_organization_set_policy_request_resource"] = compute.GlobalOrganizationSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + global_organization_set_policy_request_resource=compute.GlobalOrganizationSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyFirewallPolicyRequest(), + resource='resource_value', + global_organization_set_policy_request_resource=compute.GlobalOrganizationSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "sample1"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsFirewallPolicyRequest): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"resource": "sample1"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"resource": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/firewallPolicies/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsFirewallPolicyRequest(), + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FirewallPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FirewallPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FirewallPoliciesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FirewallPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FirewallPoliciesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FirewallPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FirewallPoliciesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.FirewallPoliciesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_firewall_policies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FirewallPoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_firewall_policies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.firewall_policies.transports.FirewallPoliciesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FirewallPoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_association', + 'add_rule', + 'clone_rules', + 'delete', + 'get', + 'get_association', + 'get_iam_policy', + 'get_rule', + 'insert', + 'list', + 'list_associations', + 'move', + 'patch', + 'patch_rule', + 'remove_association', + 'remove_rule', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_firewall_policies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.firewall_policies.transports.FirewallPoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FirewallPoliciesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_firewall_policies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.firewall_policies.transports.FirewallPoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FirewallPoliciesTransport() + adc.assert_called_once() + + +def test_firewall_policies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FirewallPoliciesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_firewall_policies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.FirewallPoliciesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_firewall_policies_host_no_port(): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_firewall_policies_host_with_port(): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FirewallPoliciesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = FirewallPoliciesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallPoliciesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = FirewallPoliciesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = FirewallPoliciesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallPoliciesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FirewallPoliciesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = FirewallPoliciesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallPoliciesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = FirewallPoliciesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = FirewallPoliciesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallPoliciesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FirewallPoliciesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = FirewallPoliciesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallPoliciesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FirewallPoliciesTransport, '_prep_wrapped_messages') as prep: + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FirewallPoliciesTransport, '_prep_wrapped_messages') as prep: + transport_class = FirewallPoliciesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = FirewallPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_firewalls.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_firewalls.py new file mode 100644 index 000000000..4ca7879f0 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_firewalls.py @@ -0,0 +1,1557 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.firewalls import FirewallsClient +from google.cloud.compute_v1.services.firewalls import pagers +from google.cloud.compute_v1.services.firewalls import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert FirewallsClient._get_default_mtls_endpoint(None) is None + assert FirewallsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert FirewallsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert FirewallsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert FirewallsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert FirewallsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + FirewallsClient, +]) +def test_firewalls_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.FirewallsRestTransport, "rest"), +]) +def test_firewalls_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + FirewallsClient, +]) +def test_firewalls_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_firewalls_client_get_transport_class(): + transport = FirewallsClient.get_transport_class() + available_transports = [ + transports.FirewallsRestTransport, + ] + assert transport in available_transports + + transport = FirewallsClient.get_transport_class("rest") + assert transport == transports.FirewallsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FirewallsClient, transports.FirewallsRestTransport, "rest"), +]) +@mock.patch.object(FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)) +def test_firewalls_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(FirewallsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(FirewallsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (FirewallsClient, transports.FirewallsRestTransport, "rest", "true"), + (FirewallsClient, transports.FirewallsRestTransport, "rest", "false"), +]) +@mock.patch.object(FirewallsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(FirewallsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_firewalls_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FirewallsClient, transports.FirewallsRestTransport, "rest"), +]) +def test_firewalls_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (FirewallsClient, transports.FirewallsRestTransport, "rest"), +]) +def test_firewalls_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "firewall": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + firewall='firewall_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteFirewallRequest(), + project='project_value', + firewall='firewall_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Firewall( + creation_timestamp='creation_timestamp_value', + description='description_value', + destination_ranges=['destination_ranges_value'], + direction='direction_value', + disabled=True, + id=205, + kind='kind_value', + name='name_value', + network='network_value', + priority=898, + self_link='self_link_value', + source_ranges=['source_ranges_value'], + source_service_accounts=['source_service_accounts_value'], + source_tags=['source_tags_value'], + target_service_accounts=['target_service_accounts_value'], + target_tags=['target_tags_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Firewall.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Firewall) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.destination_ranges == ['destination_ranges_value'] + assert response.direction == 'direction_value' + assert response.disabled is True + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.priority == 898 + assert response.self_link == 'self_link_value' + assert response.source_ranges == ['source_ranges_value'] + assert response.source_service_accounts == ['source_service_accounts_value'] + assert response.source_tags == ['source_tags_value'] + assert response.target_service_accounts == ['target_service_accounts_value'] + assert response.target_tags == ['target_tags_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Firewall() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Firewall.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "firewall": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + firewall='firewall_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetFirewallRequest(), + project='project_value', + firewall='firewall_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["firewall_resource"] = compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["firewall_resource"] = compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + firewall_resource=compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/firewalls" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertFirewallRequest(), + project='project_value', + firewall_resource=compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListFirewallsRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListFirewallsRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.FirewallList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.FirewallList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/firewalls" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListFirewallsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.FirewallList( + items=[ + compute.Firewall(), + compute.Firewall(), + compute.Firewall(), + ], + next_page_token='abc', + ), + compute.FirewallList( + items=[], + next_page_token='def', + ), + compute.FirewallList( + items=[ + compute.Firewall(), + ], + next_page_token='ghi', + ), + compute.FirewallList( + items=[ + compute.Firewall(), + compute.Firewall(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.FirewallList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Firewall) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request_init["firewall_resource"] = compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request_init["firewall_resource"] = compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "firewall": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + firewall='firewall_value', + firewall_resource=compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchFirewallRequest(), + project='project_value', + firewall='firewall_value', + firewall_resource=compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request_init["firewall_resource"] = compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateFirewallRequest): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "firewall": "sample2"} + request_init["firewall_resource"] = compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "firewall": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + firewall='firewall_value', + firewall_resource=compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/firewalls/{firewall}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateFirewallRequest(), + project='project_value', + firewall='firewall_value', + firewall_resource=compute.Firewall(allowed=[compute.Allowed(I_p_protocol='I_p_protocol_value')]), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.FirewallsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.FirewallsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FirewallsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.FirewallsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = FirewallsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.FirewallsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = FirewallsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.FirewallsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_firewalls_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.FirewallsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_firewalls_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.FirewallsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_firewalls_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FirewallsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_firewalls_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.firewalls.transports.FirewallsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.FirewallsTransport() + adc.assert_called_once() + + +def test_firewalls_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + FirewallsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_firewalls_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.FirewallsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_firewalls_host_no_port(): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_firewalls_host_with_port(): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = FirewallsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = FirewallsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = FirewallsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = FirewallsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = FirewallsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = FirewallsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = FirewallsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = FirewallsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = FirewallsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = FirewallsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = FirewallsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.FirewallsTransport, '_prep_wrapped_messages') as prep: + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.FirewallsTransport, '_prep_wrapped_messages') as prep: + transport_class = FirewallsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = FirewallsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_forwarding_rules.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_forwarding_rules.py new file mode 100644 index 000000000..b58285066 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_forwarding_rules.py @@ -0,0 +1,1932 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.forwarding_rules import ForwardingRulesClient +from google.cloud.compute_v1.services.forwarding_rules import pagers +from google.cloud.compute_v1.services.forwarding_rules import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ForwardingRulesClient._get_default_mtls_endpoint(None) is None + assert ForwardingRulesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ForwardingRulesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ForwardingRulesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ForwardingRulesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ForwardingRulesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ForwardingRulesClient, +]) +def test_forwarding_rules_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ForwardingRulesRestTransport, "rest"), +]) +def test_forwarding_rules_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ForwardingRulesClient, +]) +def test_forwarding_rules_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_forwarding_rules_client_get_transport_class(): + transport = ForwardingRulesClient.get_transport_class() + available_transports = [ + transports.ForwardingRulesRestTransport, + ] + assert transport in available_transports + + transport = ForwardingRulesClient.get_transport_class("rest") + assert transport == transports.ForwardingRulesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ForwardingRulesClient, transports.ForwardingRulesRestTransport, "rest"), +]) +@mock.patch.object(ForwardingRulesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ForwardingRulesClient)) +def test_forwarding_rules_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ForwardingRulesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ForwardingRulesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ForwardingRulesClient, transports.ForwardingRulesRestTransport, "rest", "true"), + (ForwardingRulesClient, transports.ForwardingRulesRestTransport, "rest", "false"), +]) +@mock.patch.object(ForwardingRulesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ForwardingRulesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_forwarding_rules_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ForwardingRulesClient, transports.ForwardingRulesRestTransport, "rest"), +]) +def test_forwarding_rules_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ForwardingRulesClient, transports.ForwardingRulesRestTransport, "rest"), +]) +def test_forwarding_rules_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListForwardingRulesRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRuleAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRuleAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListForwardingRulesRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRuleAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRuleAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/forwardingRules" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListForwardingRulesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ForwardingRuleAggregatedList( + items={ + 'a':compute.ForwardingRulesScopedList(), + 'b':compute.ForwardingRulesScopedList(), + 'c':compute.ForwardingRulesScopedList(), + }, + next_page_token='abc', + ), + compute.ForwardingRuleAggregatedList( + items={}, + next_page_token='def', + ), + compute.ForwardingRuleAggregatedList( + items={ + 'g':compute.ForwardingRulesScopedList(), + }, + next_page_token='ghi', + ), + compute.ForwardingRuleAggregatedList( + items={ + 'h':compute.ForwardingRulesScopedList(), + 'i':compute.ForwardingRulesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ForwardingRuleAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.ForwardingRulesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.ForwardingRulesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.ForwardingRulesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteForwardingRuleRequest(), + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRule( + I_p_address='I_p_address_value', + I_p_protocol='I_p_protocol_value', + all_ports=True, + allow_global_access=True, + backend_service='backend_service_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + ip_version='ip_version_value', + is_mirroring_collector=True, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + load_balancing_scheme='load_balancing_scheme_value', + name='name_value', + network='network_value', + network_tier='network_tier_value', + port_range='port_range_value', + ports=['ports_value'], + psc_connection_id=1793, + psc_connection_status='psc_connection_status_value', + region='region_value', + self_link='self_link_value', + service_label='service_label_value', + service_name='service_name_value', + subnetwork='subnetwork_value', + target='target_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRule.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.ForwardingRule) + assert response.I_p_address == 'I_p_address_value' + assert response.I_p_protocol == 'I_p_protocol_value' + assert response.all_ports is True + assert response.allow_global_access is True + assert response.backend_service == 'backend_service_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.ip_version == 'ip_version_value' + assert response.is_mirroring_collector is True + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.load_balancing_scheme == 'load_balancing_scheme_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.network_tier == 'network_tier_value' + assert response.port_range == 'port_range_value' + assert response.ports == ['ports_value'] + assert response.psc_connection_id == 1793 + assert response.psc_connection_status == 'psc_connection_status_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.service_label == 'service_label_value' + assert response.service_name == 'service_name_value' + assert response.subnetwork == 'subnetwork_value' + assert response.target == 'target_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRule() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRule.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetForwardingRuleRequest(), + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/forwardingRules" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertForwardingRuleRequest(), + project='project_value', + region='region_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListForwardingRulesRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRuleList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRuleList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListForwardingRulesRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRuleList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRuleList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/forwardingRules" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListForwardingRulesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ForwardingRuleList( + items=[ + compute.ForwardingRule(), + compute.ForwardingRule(), + compute.ForwardingRule(), + ], + next_page_token='abc', + ), + compute.ForwardingRuleList( + items=[], + next_page_token='def', + ), + compute.ForwardingRuleList( + items=[ + compute.ForwardingRule(), + ], + next_page_token='ghi', + ), + compute.ForwardingRuleList( + items=[ + compute.ForwardingRule(), + compute.ForwardingRule(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ForwardingRuleList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ForwardingRule) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchForwardingRuleRequest(), + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_labels_request_resource"] = compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_labels_request_resource"] = compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_labels_request_resource=compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/forwardingRules/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsForwardingRuleRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_labels_request_resource=compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_set_target_rest(transport: str = 'rest', request_type=compute.SetTargetForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request_init["target_reference_resource"] = compute.TargetReference(target='target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_target(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_target_rest_bad_request(transport: str = 'rest', request_type=compute.SetTargetForwardingRuleRequest): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + request_init["target_reference_resource"] = compute.TargetReference(target='target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_target(request) + + +def test_set_target_rest_from_dict(): + test_set_target_rest(request_type=dict) + + +def test_set_target_rest_flattened(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "forwarding_rule": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + target_reference_resource=compute.TargetReference(target='target_value'), + ) + mock_args.update(sample_request) + client.set_target(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/forwardingRules/{forwarding_rule}/setTarget" % client.transport._host, args[1]) + + +def test_set_target_rest_flattened_error(transport: str = 'rest'): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_target( + compute.SetTargetForwardingRuleRequest(), + project='project_value', + region='region_value', + forwarding_rule='forwarding_rule_value', + target_reference_resource=compute.TargetReference(target='target_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ForwardingRulesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ForwardingRulesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ForwardingRulesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ForwardingRulesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_forwarding_rules_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ForwardingRulesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_forwarding_rules_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.forwarding_rules.transports.ForwardingRulesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ForwardingRulesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'set_labels', + 'set_target', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_forwarding_rules_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.forwarding_rules.transports.ForwardingRulesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ForwardingRulesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_forwarding_rules_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.forwarding_rules.transports.ForwardingRulesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ForwardingRulesTransport() + adc.assert_called_once() + + +def test_forwarding_rules_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ForwardingRulesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_forwarding_rules_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ForwardingRulesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_forwarding_rules_host_no_port(): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_forwarding_rules_host_with_port(): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ForwardingRulesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ForwardingRulesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ForwardingRulesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ForwardingRulesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ForwardingRulesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ForwardingRulesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ForwardingRulesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ForwardingRulesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ForwardingRulesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ForwardingRulesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ForwardingRulesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ForwardingRulesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ForwardingRulesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ForwardingRulesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ForwardingRulesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ForwardingRulesTransport, '_prep_wrapped_messages') as prep: + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ForwardingRulesTransport, '_prep_wrapped_messages') as prep: + transport_class = ForwardingRulesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_addresses.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_addresses.py new file mode 100644 index 000000000..eb31d073c --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_addresses.py @@ -0,0 +1,1251 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.global_addresses import GlobalAddressesClient +from google.cloud.compute_v1.services.global_addresses import pagers +from google.cloud.compute_v1.services.global_addresses import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GlobalAddressesClient._get_default_mtls_endpoint(None) is None + assert GlobalAddressesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert GlobalAddressesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert GlobalAddressesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert GlobalAddressesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert GlobalAddressesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + GlobalAddressesClient, +]) +def test_global_addresses_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.GlobalAddressesRestTransport, "rest"), +]) +def test_global_addresses_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + GlobalAddressesClient, +]) +def test_global_addresses_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_addresses_client_get_transport_class(): + transport = GlobalAddressesClient.get_transport_class() + available_transports = [ + transports.GlobalAddressesRestTransport, + ] + assert transport in available_transports + + transport = GlobalAddressesClient.get_transport_class("rest") + assert transport == transports.GlobalAddressesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalAddressesClient, transports.GlobalAddressesRestTransport, "rest"), +]) +@mock.patch.object(GlobalAddressesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalAddressesClient)) +def test_global_addresses_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GlobalAddressesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GlobalAddressesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (GlobalAddressesClient, transports.GlobalAddressesRestTransport, "rest", "true"), + (GlobalAddressesClient, transports.GlobalAddressesRestTransport, "rest", "false"), +]) +@mock.patch.object(GlobalAddressesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalAddressesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_global_addresses_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalAddressesClient, transports.GlobalAddressesRestTransport, "rest"), +]) +def test_global_addresses_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalAddressesClient, transports.GlobalAddressesRestTransport, "rest"), +]) +def test_global_addresses_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteGlobalAddressRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "address": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteGlobalAddressRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "address": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "address": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + address='address_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/addresses/{address}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteGlobalAddressRequest(), + project='project_value', + address='address_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetGlobalAddressRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "address": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Address( + address='address_value', + address_type='address_type_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + ip_version='ip_version_value', + kind='kind_value', + name='name_value', + network='network_value', + network_tier='network_tier_value', + prefix_length=1391, + purpose='purpose_value', + region='region_value', + self_link='self_link_value', + status='status_value', + subnetwork='subnetwork_value', + users=['users_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Address.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Address) + assert response.address == 'address_value' + assert response.address_type == 'address_type_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.ip_version == 'ip_version_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.network_tier == 'network_tier_value' + assert response.prefix_length == 1391 + assert response.purpose == 'purpose_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.subnetwork == 'subnetwork_value' + assert response.users == ['users_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetGlobalAddressRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "address": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Address() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Address.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "address": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + address='address_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/addresses/{address}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetGlobalAddressRequest(), + project='project_value', + address='address_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertGlobalAddressRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["address_resource"] = compute.Address(address='address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertGlobalAddressRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["address_resource"] = compute.Address(address='address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + address_resource=compute.Address(address='address_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/addresses" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertGlobalAddressRequest(), + project='project_value', + address_resource=compute.Address(address='address_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListGlobalAddressesRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AddressList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AddressList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListGlobalAddressesRequest): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.AddressList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.AddressList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/addresses" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListGlobalAddressesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.AddressList( + items=[ + compute.Address(), + compute.Address(), + compute.Address(), + ], + next_page_token='abc', + ), + compute.AddressList( + items=[], + next_page_token='def', + ), + compute.AddressList( + items=[ + compute.Address(), + ], + next_page_token='ghi', + ), + compute.AddressList( + items=[ + compute.Address(), + compute.Address(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.AddressList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Address) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GlobalAddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GlobalAddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalAddressesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GlobalAddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalAddressesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GlobalAddressesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GlobalAddressesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.GlobalAddressesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_global_addresses_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GlobalAddressesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_global_addresses_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.global_addresses.transports.GlobalAddressesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.GlobalAddressesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_global_addresses_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.global_addresses.transports.GlobalAddressesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalAddressesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_global_addresses_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.global_addresses.transports.GlobalAddressesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalAddressesTransport() + adc.assert_called_once() + + +def test_global_addresses_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GlobalAddressesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_global_addresses_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.GlobalAddressesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_global_addresses_host_no_port(): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_addresses_host_with_port(): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = GlobalAddressesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = GlobalAddressesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalAddressesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = GlobalAddressesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = GlobalAddressesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalAddressesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = GlobalAddressesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = GlobalAddressesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalAddressesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = GlobalAddressesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = GlobalAddressesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalAddressesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = GlobalAddressesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = GlobalAddressesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalAddressesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.GlobalAddressesTransport, '_prep_wrapped_messages') as prep: + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.GlobalAddressesTransport, '_prep_wrapped_messages') as prep: + transport_class = GlobalAddressesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = GlobalAddressesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_forwarding_rules.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_forwarding_rules.py new file mode 100644 index 000000000..b3db6401e --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_forwarding_rules.py @@ -0,0 +1,1733 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.global_forwarding_rules import GlobalForwardingRulesClient +from google.cloud.compute_v1.services.global_forwarding_rules import pagers +from google.cloud.compute_v1.services.global_forwarding_rules import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GlobalForwardingRulesClient._get_default_mtls_endpoint(None) is None + assert GlobalForwardingRulesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert GlobalForwardingRulesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert GlobalForwardingRulesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert GlobalForwardingRulesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert GlobalForwardingRulesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + GlobalForwardingRulesClient, +]) +def test_global_forwarding_rules_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.GlobalForwardingRulesRestTransport, "rest"), +]) +def test_global_forwarding_rules_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + GlobalForwardingRulesClient, +]) +def test_global_forwarding_rules_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_forwarding_rules_client_get_transport_class(): + transport = GlobalForwardingRulesClient.get_transport_class() + available_transports = [ + transports.GlobalForwardingRulesRestTransport, + ] + assert transport in available_transports + + transport = GlobalForwardingRulesClient.get_transport_class("rest") + assert transport == transports.GlobalForwardingRulesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalForwardingRulesClient, transports.GlobalForwardingRulesRestTransport, "rest"), +]) +@mock.patch.object(GlobalForwardingRulesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalForwardingRulesClient)) +def test_global_forwarding_rules_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GlobalForwardingRulesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GlobalForwardingRulesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (GlobalForwardingRulesClient, transports.GlobalForwardingRulesRestTransport, "rest", "true"), + (GlobalForwardingRulesClient, transports.GlobalForwardingRulesRestTransport, "rest", "false"), +]) +@mock.patch.object(GlobalForwardingRulesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalForwardingRulesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_global_forwarding_rules_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalForwardingRulesClient, transports.GlobalForwardingRulesRestTransport, "rest"), +]) +def test_global_forwarding_rules_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalForwardingRulesClient, transports.GlobalForwardingRulesRestTransport, "rest"), +]) +def test_global_forwarding_rules_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "forwarding_rule": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + forwarding_rule='forwarding_rule_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteGlobalForwardingRuleRequest(), + project='project_value', + forwarding_rule='forwarding_rule_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRule( + I_p_address='I_p_address_value', + I_p_protocol='I_p_protocol_value', + all_ports=True, + allow_global_access=True, + backend_service='backend_service_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + ip_version='ip_version_value', + is_mirroring_collector=True, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + load_balancing_scheme='load_balancing_scheme_value', + name='name_value', + network='network_value', + network_tier='network_tier_value', + port_range='port_range_value', + ports=['ports_value'], + psc_connection_id=1793, + psc_connection_status='psc_connection_status_value', + region='region_value', + self_link='self_link_value', + service_label='service_label_value', + service_name='service_name_value', + subnetwork='subnetwork_value', + target='target_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRule.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.ForwardingRule) + assert response.I_p_address == 'I_p_address_value' + assert response.I_p_protocol == 'I_p_protocol_value' + assert response.all_ports is True + assert response.allow_global_access is True + assert response.backend_service == 'backend_service_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.ip_version == 'ip_version_value' + assert response.is_mirroring_collector is True + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.load_balancing_scheme == 'load_balancing_scheme_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.network_tier == 'network_tier_value' + assert response.port_range == 'port_range_value' + assert response.ports == ['ports_value'] + assert response.psc_connection_id == 1793 + assert response.psc_connection_status == 'psc_connection_status_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.service_label == 'service_label_value' + assert response.service_name == 'service_name_value' + assert response.subnetwork == 'subnetwork_value' + assert response.target == 'target_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRule() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRule.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "forwarding_rule": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + forwarding_rule='forwarding_rule_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetGlobalForwardingRuleRequest(), + project='project_value', + forwarding_rule='forwarding_rule_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/forwardingRules" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertGlobalForwardingRuleRequest(), + project='project_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListGlobalForwardingRulesRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRuleList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRuleList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListGlobalForwardingRulesRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ForwardingRuleList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ForwardingRuleList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/forwardingRules" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListGlobalForwardingRulesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ForwardingRuleList( + items=[ + compute.ForwardingRule(), + compute.ForwardingRule(), + compute.ForwardingRule(), + ], + next_page_token='abc', + ), + compute.ForwardingRuleList( + items=[], + next_page_token='def', + ), + compute.ForwardingRuleList( + items=[ + compute.ForwardingRule(), + ], + next_page_token='ghi', + ), + compute.ForwardingRuleList( + items=[ + compute.ForwardingRule(), + compute.ForwardingRule(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ForwardingRuleList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ForwardingRule) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request_init["forwarding_rule_resource"] = compute.ForwardingRule(I_p_address='I_p_address_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "forwarding_rule": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + forwarding_rule='forwarding_rule_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchGlobalForwardingRuleRequest(), + project='project_value', + forwarding_rule='forwarding_rule_value', + forwarding_rule_resource=compute.ForwardingRule(I_p_address='I_p_address_value'), + ) + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/forwardingRules/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsGlobalForwardingRuleRequest(), + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_set_target_rest(transport: str = 'rest', request_type=compute.SetTargetGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request_init["target_reference_resource"] = compute.TargetReference(target='target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_target(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_target_rest_bad_request(transport: str = 'rest', request_type=compute.SetTargetGlobalForwardingRuleRequest): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "forwarding_rule": "sample2"} + request_init["target_reference_resource"] = compute.TargetReference(target='target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_target(request) + + +def test_set_target_rest_from_dict(): + test_set_target_rest(request_type=dict) + + +def test_set_target_rest_flattened(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "forwarding_rule": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + forwarding_rule='forwarding_rule_value', + target_reference_resource=compute.TargetReference(target='target_value'), + ) + mock_args.update(sample_request) + client.set_target(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/forwardingRules/{forwarding_rule}/setTarget" % client.transport._host, args[1]) + + +def test_set_target_rest_flattened_error(transport: str = 'rest'): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_target( + compute.SetTargetGlobalForwardingRuleRequest(), + project='project_value', + forwarding_rule='forwarding_rule_value', + target_reference_resource=compute.TargetReference(target='target_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GlobalForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GlobalForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalForwardingRulesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GlobalForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalForwardingRulesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GlobalForwardingRulesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GlobalForwardingRulesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.GlobalForwardingRulesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_global_forwarding_rules_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GlobalForwardingRulesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_global_forwarding_rules_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.global_forwarding_rules.transports.GlobalForwardingRulesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.GlobalForwardingRulesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'set_labels', + 'set_target', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_global_forwarding_rules_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.global_forwarding_rules.transports.GlobalForwardingRulesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalForwardingRulesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_global_forwarding_rules_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.global_forwarding_rules.transports.GlobalForwardingRulesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalForwardingRulesTransport() + adc.assert_called_once() + + +def test_global_forwarding_rules_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GlobalForwardingRulesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_global_forwarding_rules_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.GlobalForwardingRulesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_global_forwarding_rules_host_no_port(): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_forwarding_rules_host_with_port(): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = GlobalForwardingRulesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = GlobalForwardingRulesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalForwardingRulesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = GlobalForwardingRulesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = GlobalForwardingRulesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalForwardingRulesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = GlobalForwardingRulesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = GlobalForwardingRulesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalForwardingRulesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = GlobalForwardingRulesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = GlobalForwardingRulesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalForwardingRulesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = GlobalForwardingRulesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = GlobalForwardingRulesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalForwardingRulesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.GlobalForwardingRulesTransport, '_prep_wrapped_messages') as prep: + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.GlobalForwardingRulesTransport, '_prep_wrapped_messages') as prep: + transport_class = GlobalForwardingRulesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = GlobalForwardingRulesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_network_endpoint_groups.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_network_endpoint_groups.py new file mode 100644 index 000000000..2cc431656 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_network_endpoint_groups.py @@ -0,0 +1,1724 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.global_network_endpoint_groups import GlobalNetworkEndpointGroupsClient +from google.cloud.compute_v1.services.global_network_endpoint_groups import pagers +from google.cloud.compute_v1.services.global_network_endpoint_groups import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GlobalNetworkEndpointGroupsClient._get_default_mtls_endpoint(None) is None + assert GlobalNetworkEndpointGroupsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert GlobalNetworkEndpointGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert GlobalNetworkEndpointGroupsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert GlobalNetworkEndpointGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert GlobalNetworkEndpointGroupsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + GlobalNetworkEndpointGroupsClient, +]) +def test_global_network_endpoint_groups_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.GlobalNetworkEndpointGroupsRestTransport, "rest"), +]) +def test_global_network_endpoint_groups_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + GlobalNetworkEndpointGroupsClient, +]) +def test_global_network_endpoint_groups_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_network_endpoint_groups_client_get_transport_class(): + transport = GlobalNetworkEndpointGroupsClient.get_transport_class() + available_transports = [ + transports.GlobalNetworkEndpointGroupsRestTransport, + ] + assert transport in available_transports + + transport = GlobalNetworkEndpointGroupsClient.get_transport_class("rest") + assert transport == transports.GlobalNetworkEndpointGroupsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalNetworkEndpointGroupsClient, transports.GlobalNetworkEndpointGroupsRestTransport, "rest"), +]) +@mock.patch.object(GlobalNetworkEndpointGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalNetworkEndpointGroupsClient)) +def test_global_network_endpoint_groups_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GlobalNetworkEndpointGroupsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GlobalNetworkEndpointGroupsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (GlobalNetworkEndpointGroupsClient, transports.GlobalNetworkEndpointGroupsRestTransport, "rest", "true"), + (GlobalNetworkEndpointGroupsClient, transports.GlobalNetworkEndpointGroupsRestTransport, "rest", "false"), +]) +@mock.patch.object(GlobalNetworkEndpointGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalNetworkEndpointGroupsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_global_network_endpoint_groups_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalNetworkEndpointGroupsClient, transports.GlobalNetworkEndpointGroupsRestTransport, "rest"), +]) +def test_global_network_endpoint_groups_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalNetworkEndpointGroupsClient, transports.GlobalNetworkEndpointGroupsRestTransport, "rest"), +]) +def test_global_network_endpoint_groups_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_attach_network_endpoints_rest(transport: str = 'rest', request_type=compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request_init["global_network_endpoint_groups_attach_endpoints_request_resource"] = compute.GlobalNetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.attach_network_endpoints(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_attach_network_endpoints_rest_bad_request(transport: str = 'rest', request_type=compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request_init["global_network_endpoint_groups_attach_endpoints_request_resource"] = compute.GlobalNetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.attach_network_endpoints(request) + + +def test_attach_network_endpoints_rest_from_dict(): + test_attach_network_endpoints_rest(request_type=dict) + + +def test_attach_network_endpoints_rest_flattened(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network_endpoint_group": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network_endpoint_group='network_endpoint_group_value', + global_network_endpoint_groups_attach_endpoints_request_resource=compute.GlobalNetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + mock_args.update(sample_request) + client.attach_network_endpoints(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}/attachNetworkEndpoints" % client.transport._host, args[1]) + + +def test_attach_network_endpoints_rest_flattened_error(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.attach_network_endpoints( + compute.AttachNetworkEndpointsGlobalNetworkEndpointGroupRequest(), + project='project_value', + network_endpoint_group='network_endpoint_group_value', + global_network_endpoint_groups_attach_endpoints_request_resource=compute.GlobalNetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network_endpoint_group": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network_endpoint_group='network_endpoint_group_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteGlobalNetworkEndpointGroupRequest(), + project='project_value', + network_endpoint_group='network_endpoint_group_value', + ) + + +def test_detach_network_endpoints_rest(transport: str = 'rest', request_type=compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request_init["global_network_endpoint_groups_detach_endpoints_request_resource"] = compute.GlobalNetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.detach_network_endpoints(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_detach_network_endpoints_rest_bad_request(transport: str = 'rest', request_type=compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request_init["global_network_endpoint_groups_detach_endpoints_request_resource"] = compute.GlobalNetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.detach_network_endpoints(request) + + +def test_detach_network_endpoints_rest_from_dict(): + test_detach_network_endpoints_rest(request_type=dict) + + +def test_detach_network_endpoints_rest_flattened(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network_endpoint_group": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network_endpoint_group='network_endpoint_group_value', + global_network_endpoint_groups_detach_endpoints_request_resource=compute.GlobalNetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + mock_args.update(sample_request) + client.detach_network_endpoints(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}/detachNetworkEndpoints" % client.transport._host, args[1]) + + +def test_detach_network_endpoints_rest_flattened_error(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.detach_network_endpoints( + compute.DetachNetworkEndpointsGlobalNetworkEndpointGroupRequest(), + project='project_value', + network_endpoint_group='network_endpoint_group_value', + global_network_endpoint_groups_detach_endpoints_request_resource=compute.GlobalNetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroup( + creation_timestamp='creation_timestamp_value', + default_port=1289, + description='description_value', + id=205, + kind='kind_value', + name='name_value', + network='network_value', + network_endpoint_type='network_endpoint_type_value', + region='region_value', + self_link='self_link_value', + size=443, + subnetwork='subnetwork_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroup.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NetworkEndpointGroup) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_port == 1289 + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.network_endpoint_type == 'network_endpoint_type_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.size == 443 + assert response.subnetwork == 'subnetwork_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroup() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroup.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network_endpoint_group": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network_endpoint_group='network_endpoint_group_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetGlobalNetworkEndpointGroupRequest(), + project='project_value', + network_endpoint_group='network_endpoint_group_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["network_endpoint_group_resource"] = compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertGlobalNetworkEndpointGroupRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["network_endpoint_group_resource"] = compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network_endpoint_group_resource=compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networkEndpointGroups" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertGlobalNetworkEndpointGroupRequest(), + project='project_value', + network_endpoint_group_resource=compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListGlobalNetworkEndpointGroupsRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListGlobalNetworkEndpointGroupsRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networkEndpointGroups" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListGlobalNetworkEndpointGroupsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + ], + next_page_token='abc', + ), + compute.NetworkEndpointGroupList( + items=[], + next_page_token='def', + ), + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + ], + next_page_token='ghi', + ), + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NetworkEndpointGroupList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NetworkEndpointGroup) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_network_endpoints_rest(transport: str = 'rest', request_type=compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupsListNetworkEndpoints( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupsListNetworkEndpoints.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_network_endpoints(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNetworkEndpointsPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + + +def test_list_network_endpoints_rest_bad_request(transport: str = 'rest', request_type=compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network_endpoint_group": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_network_endpoints(request) + + +def test_list_network_endpoints_rest_from_dict(): + test_list_network_endpoints_rest(request_type=dict) + + +def test_list_network_endpoints_rest_flattened(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupsListNetworkEndpoints() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupsListNetworkEndpoints.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network_endpoint_group": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network_endpoint_group='network_endpoint_group_value', + ) + mock_args.update(sample_request) + client.list_network_endpoints(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networkEndpointGroups/{network_endpoint_group}/listNetworkEndpoints" % client.transport._host, args[1]) + + +def test_list_network_endpoints_rest_flattened_error(transport: str = 'rest'): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_network_endpoints( + compute.ListNetworkEndpointsGlobalNetworkEndpointGroupsRequest(), + project='project_value', + network_endpoint_group='network_endpoint_group_value', + ) + + +def test_list_network_endpoints_rest_pager(): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[ + compute.NetworkEndpointWithHealthStatus(), + compute.NetworkEndpointWithHealthStatus(), + compute.NetworkEndpointWithHealthStatus(), + ], + next_page_token='abc', + ), + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[], + next_page_token='def', + ), + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[ + compute.NetworkEndpointWithHealthStatus(), + ], + next_page_token='ghi', + ), + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[ + compute.NetworkEndpointWithHealthStatus(), + compute.NetworkEndpointWithHealthStatus(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NetworkEndpointGroupsListNetworkEndpoints.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "network_endpoint_group": "sample2"} + + pager = client.list_network_endpoints(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NetworkEndpointWithHealthStatus) + for i in results) + + pages = list(client.list_network_endpoints(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GlobalNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GlobalNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalNetworkEndpointGroupsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GlobalNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalNetworkEndpointGroupsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GlobalNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GlobalNetworkEndpointGroupsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.GlobalNetworkEndpointGroupsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_global_network_endpoint_groups_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GlobalNetworkEndpointGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_global_network_endpoint_groups_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.global_network_endpoint_groups.transports.GlobalNetworkEndpointGroupsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.GlobalNetworkEndpointGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'attach_network_endpoints', + 'delete', + 'detach_network_endpoints', + 'get', + 'insert', + 'list', + 'list_network_endpoints', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_global_network_endpoint_groups_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.global_network_endpoint_groups.transports.GlobalNetworkEndpointGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalNetworkEndpointGroupsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_global_network_endpoint_groups_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.global_network_endpoint_groups.transports.GlobalNetworkEndpointGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalNetworkEndpointGroupsTransport() + adc.assert_called_once() + + +def test_global_network_endpoint_groups_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GlobalNetworkEndpointGroupsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_global_network_endpoint_groups_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.GlobalNetworkEndpointGroupsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_global_network_endpoint_groups_host_no_port(): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_network_endpoint_groups_host_with_port(): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = GlobalNetworkEndpointGroupsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = GlobalNetworkEndpointGroupsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalNetworkEndpointGroupsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = GlobalNetworkEndpointGroupsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = GlobalNetworkEndpointGroupsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalNetworkEndpointGroupsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = GlobalNetworkEndpointGroupsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = GlobalNetworkEndpointGroupsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalNetworkEndpointGroupsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = GlobalNetworkEndpointGroupsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = GlobalNetworkEndpointGroupsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalNetworkEndpointGroupsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = GlobalNetworkEndpointGroupsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = GlobalNetworkEndpointGroupsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalNetworkEndpointGroupsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.GlobalNetworkEndpointGroupsTransport, '_prep_wrapped_messages') as prep: + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.GlobalNetworkEndpointGroupsTransport, '_prep_wrapped_messages') as prep: + transport_class = GlobalNetworkEndpointGroupsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = GlobalNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_operations.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_operations.py new file mode 100644 index 000000000..fa03f32bb --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_operations.py @@ -0,0 +1,1400 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.global_operations import GlobalOperationsClient +from google.cloud.compute_v1.services.global_operations import pagers +from google.cloud.compute_v1.services.global_operations import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GlobalOperationsClient._get_default_mtls_endpoint(None) is None + assert GlobalOperationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert GlobalOperationsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert GlobalOperationsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert GlobalOperationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert GlobalOperationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + GlobalOperationsClient, +]) +def test_global_operations_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.GlobalOperationsRestTransport, "rest"), +]) +def test_global_operations_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + GlobalOperationsClient, +]) +def test_global_operations_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_operations_client_get_transport_class(): + transport = GlobalOperationsClient.get_transport_class() + available_transports = [ + transports.GlobalOperationsRestTransport, + ] + assert transport in available_transports + + transport = GlobalOperationsClient.get_transport_class("rest") + assert transport == transports.GlobalOperationsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalOperationsClient, transports.GlobalOperationsRestTransport, "rest"), +]) +@mock.patch.object(GlobalOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalOperationsClient)) +def test_global_operations_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GlobalOperationsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GlobalOperationsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (GlobalOperationsClient, transports.GlobalOperationsRestTransport, "rest", "true"), + (GlobalOperationsClient, transports.GlobalOperationsRestTransport, "rest", "false"), +]) +@mock.patch.object(GlobalOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalOperationsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_global_operations_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalOperationsClient, transports.GlobalOperationsRestTransport, "rest"), +]) +def test_global_operations_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalOperationsClient, transports.GlobalOperationsRestTransport, "rest"), +]) +def test_global_operations_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListGlobalOperationsRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListGlobalOperationsRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/operations" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListGlobalOperationsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.OperationAggregatedList( + items={ + 'a':compute.OperationsScopedList(), + 'b':compute.OperationsScopedList(), + 'c':compute.OperationsScopedList(), + }, + next_page_token='abc', + ), + compute.OperationAggregatedList( + items={}, + next_page_token='def', + ), + compute.OperationAggregatedList( + items={ + 'g':compute.OperationsScopedList(), + }, + next_page_token='ghi', + ), + compute.OperationAggregatedList( + items={ + 'h':compute.OperationsScopedList(), + 'i':compute.OperationsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.OperationAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.OperationsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.OperationsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.OperationsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteGlobalOperationRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "operation": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteGlobalOperationResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteGlobalOperationResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.DeleteGlobalOperationResponse) + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteGlobalOperationRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "operation": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteGlobalOperationResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteGlobalOperationResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "operation": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/operations/{operation}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteGlobalOperationRequest(), + project='project_value', + operation='operation_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetGlobalOperationRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "operation": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetGlobalOperationRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "operation": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "operation": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/operations/{operation}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetGlobalOperationRequest(), + project='project_value', + operation='operation_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListGlobalOperationsRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListGlobalOperationsRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/operations" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListGlobalOperationsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + compute.Operation(), + ], + next_page_token='abc', + ), + compute.OperationList( + items=[], + next_page_token='def', + ), + compute.OperationList( + items=[ + compute.Operation(), + ], + next_page_token='ghi', + ), + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.OperationList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Operation) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_wait_rest(transport: str = 'rest', request_type=compute.WaitGlobalOperationRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "operation": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.wait(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_wait_rest_bad_request(transport: str = 'rest', request_type=compute.WaitGlobalOperationRequest): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "operation": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait(request) + + +def test_wait_rest_from_dict(): + test_wait_rest(request_type=dict) + + +def test_wait_rest_flattened(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "operation": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.wait(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/operations/{operation}/wait" % client.transport._host, args[1]) + + +def test_wait_rest_flattened_error(transport: str = 'rest'): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.wait( + compute.WaitGlobalOperationRequest(), + project='project_value', + operation='operation_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GlobalOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GlobalOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalOperationsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GlobalOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalOperationsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GlobalOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GlobalOperationsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.GlobalOperationsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_global_operations_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GlobalOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_global_operations_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.global_operations.transports.GlobalOperationsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.GlobalOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'list', + 'wait', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_global_operations_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.global_operations.transports.GlobalOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalOperationsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_global_operations_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.global_operations.transports.GlobalOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalOperationsTransport() + adc.assert_called_once() + + +def test_global_operations_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GlobalOperationsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_global_operations_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.GlobalOperationsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_global_operations_host_no_port(): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_operations_host_with_port(): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = GlobalOperationsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = GlobalOperationsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOperationsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = GlobalOperationsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = GlobalOperationsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOperationsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = GlobalOperationsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = GlobalOperationsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOperationsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = GlobalOperationsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = GlobalOperationsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOperationsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = GlobalOperationsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = GlobalOperationsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOperationsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.GlobalOperationsTransport, '_prep_wrapped_messages') as prep: + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.GlobalOperationsTransport, '_prep_wrapped_messages') as prep: + transport_class = GlobalOperationsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = GlobalOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_organization_operations.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_organization_operations.py new file mode 100644 index 000000000..cdb4cf5ec --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_organization_operations.py @@ -0,0 +1,1010 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.global_organization_operations import GlobalOrganizationOperationsClient +from google.cloud.compute_v1.services.global_organization_operations import pagers +from google.cloud.compute_v1.services.global_organization_operations import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GlobalOrganizationOperationsClient._get_default_mtls_endpoint(None) is None + assert GlobalOrganizationOperationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert GlobalOrganizationOperationsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert GlobalOrganizationOperationsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert GlobalOrganizationOperationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert GlobalOrganizationOperationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + GlobalOrganizationOperationsClient, +]) +def test_global_organization_operations_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.GlobalOrganizationOperationsRestTransport, "rest"), +]) +def test_global_organization_operations_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + GlobalOrganizationOperationsClient, +]) +def test_global_organization_operations_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_organization_operations_client_get_transport_class(): + transport = GlobalOrganizationOperationsClient.get_transport_class() + available_transports = [ + transports.GlobalOrganizationOperationsRestTransport, + ] + assert transport in available_transports + + transport = GlobalOrganizationOperationsClient.get_transport_class("rest") + assert transport == transports.GlobalOrganizationOperationsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalOrganizationOperationsClient, transports.GlobalOrganizationOperationsRestTransport, "rest"), +]) +@mock.patch.object(GlobalOrganizationOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalOrganizationOperationsClient)) +def test_global_organization_operations_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GlobalOrganizationOperationsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GlobalOrganizationOperationsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (GlobalOrganizationOperationsClient, transports.GlobalOrganizationOperationsRestTransport, "rest", "true"), + (GlobalOrganizationOperationsClient, transports.GlobalOrganizationOperationsRestTransport, "rest", "false"), +]) +@mock.patch.object(GlobalOrganizationOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalOrganizationOperationsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_global_organization_operations_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalOrganizationOperationsClient, transports.GlobalOrganizationOperationsRestTransport, "rest"), +]) +def test_global_organization_operations_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalOrganizationOperationsClient, transports.GlobalOrganizationOperationsRestTransport, "rest"), +]) +def test_global_organization_operations_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteGlobalOrganizationOperationRequest): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"operation": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteGlobalOrganizationOperationResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteGlobalOrganizationOperationResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.DeleteGlobalOrganizationOperationResponse) + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteGlobalOrganizationOperationRequest): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"operation": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteGlobalOrganizationOperationResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteGlobalOrganizationOperationResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"operation": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + operation='operation_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/operations/{operation}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteGlobalOrganizationOperationRequest(), + operation='operation_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetGlobalOrganizationOperationRequest): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"operation": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetGlobalOrganizationOperationRequest): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"operation": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"operation": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + operation='operation_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/locations/global/operations/{operation}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetGlobalOrganizationOperationRequest(), + operation='operation_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListGlobalOrganizationOperationsRequest): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListGlobalOrganizationOperationsRequest): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_pager(): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + compute.Operation(), + ], + next_page_token='abc', + ), + compute.OperationList( + items=[], + next_page_token='def', + ), + compute.OperationList( + items=[ + compute.Operation(), + ], + next_page_token='ghi', + ), + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.OperationList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Operation) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GlobalOrganizationOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GlobalOrganizationOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalOrganizationOperationsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GlobalOrganizationOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalOrganizationOperationsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GlobalOrganizationOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GlobalOrganizationOperationsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.GlobalOrganizationOperationsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_global_organization_operations_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GlobalOrganizationOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_global_organization_operations_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.global_organization_operations.transports.GlobalOrganizationOperationsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.GlobalOrganizationOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_global_organization_operations_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.global_organization_operations.transports.GlobalOrganizationOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalOrganizationOperationsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_global_organization_operations_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.global_organization_operations.transports.GlobalOrganizationOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalOrganizationOperationsTransport() + adc.assert_called_once() + + +def test_global_organization_operations_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GlobalOrganizationOperationsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_global_organization_operations_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.GlobalOrganizationOperationsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_global_organization_operations_host_no_port(): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_organization_operations_host_with_port(): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = GlobalOrganizationOperationsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = GlobalOrganizationOperationsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOrganizationOperationsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = GlobalOrganizationOperationsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = GlobalOrganizationOperationsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOrganizationOperationsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = GlobalOrganizationOperationsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = GlobalOrganizationOperationsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOrganizationOperationsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = GlobalOrganizationOperationsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = GlobalOrganizationOperationsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOrganizationOperationsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = GlobalOrganizationOperationsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = GlobalOrganizationOperationsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalOrganizationOperationsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.GlobalOrganizationOperationsTransport, '_prep_wrapped_messages') as prep: + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.GlobalOrganizationOperationsTransport, '_prep_wrapped_messages') as prep: + transport_class = GlobalOrganizationOperationsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = GlobalOrganizationOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_public_delegated_prefixes.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_public_delegated_prefixes.py new file mode 100644 index 000000000..83395b215 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_global_public_delegated_prefixes.py @@ -0,0 +1,1395 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.global_public_delegated_prefixes import GlobalPublicDelegatedPrefixesClient +from google.cloud.compute_v1.services.global_public_delegated_prefixes import pagers +from google.cloud.compute_v1.services.global_public_delegated_prefixes import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(None) is None + assert GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert GlobalPublicDelegatedPrefixesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + GlobalPublicDelegatedPrefixesClient, +]) +def test_global_public_delegated_prefixes_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.GlobalPublicDelegatedPrefixesRestTransport, "rest"), +]) +def test_global_public_delegated_prefixes_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + GlobalPublicDelegatedPrefixesClient, +]) +def test_global_public_delegated_prefixes_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_public_delegated_prefixes_client_get_transport_class(): + transport = GlobalPublicDelegatedPrefixesClient.get_transport_class() + available_transports = [ + transports.GlobalPublicDelegatedPrefixesRestTransport, + ] + assert transport in available_transports + + transport = GlobalPublicDelegatedPrefixesClient.get_transport_class("rest") + assert transport == transports.GlobalPublicDelegatedPrefixesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalPublicDelegatedPrefixesClient, transports.GlobalPublicDelegatedPrefixesRestTransport, "rest"), +]) +@mock.patch.object(GlobalPublicDelegatedPrefixesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalPublicDelegatedPrefixesClient)) +def test_global_public_delegated_prefixes_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(GlobalPublicDelegatedPrefixesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(GlobalPublicDelegatedPrefixesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (GlobalPublicDelegatedPrefixesClient, transports.GlobalPublicDelegatedPrefixesRestTransport, "rest", "true"), + (GlobalPublicDelegatedPrefixesClient, transports.GlobalPublicDelegatedPrefixesRestTransport, "rest", "false"), +]) +@mock.patch.object(GlobalPublicDelegatedPrefixesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(GlobalPublicDelegatedPrefixesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_global_public_delegated_prefixes_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalPublicDelegatedPrefixesClient, transports.GlobalPublicDelegatedPrefixesRestTransport, "rest"), +]) +def test_global_public_delegated_prefixes_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (GlobalPublicDelegatedPrefixesClient, transports.GlobalPublicDelegatedPrefixesRestTransport, "rest"), +]) +def test_global_public_delegated_prefixes_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_delegated_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_delegated_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "public_delegated_prefix": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicDelegatedPrefixes/{public_delegated_prefix}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteGlobalPublicDelegatedPrefixeRequest(), + project='project_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_delegated_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefix( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + ip_cidr_range='ip_cidr_range_value', + is_live_migration=True, + kind='kind_value', + name='name_value', + parent_prefix='parent_prefix_value', + region='region_value', + self_link='self_link_value', + status='status_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefix.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.PublicDelegatedPrefix) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.ip_cidr_range == 'ip_cidr_range_value' + assert response.is_live_migration is True + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.parent_prefix == 'parent_prefix_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_delegated_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefix() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefix.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "public_delegated_prefix": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicDelegatedPrefixes/{public_delegated_prefix}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetGlobalPublicDelegatedPrefixeRequest(), + project='project_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicDelegatedPrefixes" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertGlobalPublicDelegatedPrefixeRequest(), + project='project_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListGlobalPublicDelegatedPrefixesRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefixList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefixList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListGlobalPublicDelegatedPrefixesRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefixList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefixList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicDelegatedPrefixes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListGlobalPublicDelegatedPrefixesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.PublicDelegatedPrefixList( + items=[ + compute.PublicDelegatedPrefix(), + compute.PublicDelegatedPrefix(), + compute.PublicDelegatedPrefix(), + ], + next_page_token='abc', + ), + compute.PublicDelegatedPrefixList( + items=[], + next_page_token='def', + ), + compute.PublicDelegatedPrefixList( + items=[ + compute.PublicDelegatedPrefix(), + ], + next_page_token='ghi', + ), + compute.PublicDelegatedPrefixList( + items=[ + compute.PublicDelegatedPrefix(), + compute.PublicDelegatedPrefix(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.PublicDelegatedPrefixList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.PublicDelegatedPrefix) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_delegated_prefix": "sample2"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchGlobalPublicDelegatedPrefixeRequest): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_delegated_prefix": "sample2"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "public_delegated_prefix": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_delegated_prefix='public_delegated_prefix_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicDelegatedPrefixes/{public_delegated_prefix}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchGlobalPublicDelegatedPrefixeRequest(), + project='project_value', + public_delegated_prefix='public_delegated_prefix_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.GlobalPublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.GlobalPublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalPublicDelegatedPrefixesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.GlobalPublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = GlobalPublicDelegatedPrefixesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.GlobalPublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = GlobalPublicDelegatedPrefixesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.GlobalPublicDelegatedPrefixesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_global_public_delegated_prefixes_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.GlobalPublicDelegatedPrefixesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_global_public_delegated_prefixes_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.global_public_delegated_prefixes.transports.GlobalPublicDelegatedPrefixesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.GlobalPublicDelegatedPrefixesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_global_public_delegated_prefixes_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.global_public_delegated_prefixes.transports.GlobalPublicDelegatedPrefixesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalPublicDelegatedPrefixesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_global_public_delegated_prefixes_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.global_public_delegated_prefixes.transports.GlobalPublicDelegatedPrefixesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.GlobalPublicDelegatedPrefixesTransport() + adc.assert_called_once() + + +def test_global_public_delegated_prefixes_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + GlobalPublicDelegatedPrefixesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_global_public_delegated_prefixes_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.GlobalPublicDelegatedPrefixesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_global_public_delegated_prefixes_host_no_port(): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_global_public_delegated_prefixes_host_with_port(): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = GlobalPublicDelegatedPrefixesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = GlobalPublicDelegatedPrefixesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalPublicDelegatedPrefixesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = GlobalPublicDelegatedPrefixesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = GlobalPublicDelegatedPrefixesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalPublicDelegatedPrefixesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = GlobalPublicDelegatedPrefixesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = GlobalPublicDelegatedPrefixesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalPublicDelegatedPrefixesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = GlobalPublicDelegatedPrefixesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = GlobalPublicDelegatedPrefixesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalPublicDelegatedPrefixesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = GlobalPublicDelegatedPrefixesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = GlobalPublicDelegatedPrefixesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = GlobalPublicDelegatedPrefixesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.GlobalPublicDelegatedPrefixesTransport, '_prep_wrapped_messages') as prep: + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.GlobalPublicDelegatedPrefixesTransport, '_prep_wrapped_messages') as prep: + transport_class = GlobalPublicDelegatedPrefixesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = GlobalPublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_health_checks.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_health_checks.py new file mode 100644 index 000000000..7498a7ad8 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_health_checks.py @@ -0,0 +1,1734 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.health_checks import HealthChecksClient +from google.cloud.compute_v1.services.health_checks import pagers +from google.cloud.compute_v1.services.health_checks import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert HealthChecksClient._get_default_mtls_endpoint(None) is None + assert HealthChecksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert HealthChecksClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert HealthChecksClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert HealthChecksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert HealthChecksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + HealthChecksClient, +]) +def test_health_checks_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.HealthChecksRestTransport, "rest"), +]) +def test_health_checks_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + HealthChecksClient, +]) +def test_health_checks_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_health_checks_client_get_transport_class(): + transport = HealthChecksClient.get_transport_class() + available_transports = [ + transports.HealthChecksRestTransport, + ] + assert transport in available_transports + + transport = HealthChecksClient.get_transport_class("rest") + assert transport == transports.HealthChecksRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (HealthChecksClient, transports.HealthChecksRestTransport, "rest"), +]) +@mock.patch.object(HealthChecksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(HealthChecksClient)) +def test_health_checks_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(HealthChecksClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(HealthChecksClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (HealthChecksClient, transports.HealthChecksRestTransport, "rest", "true"), + (HealthChecksClient, transports.HealthChecksRestTransport, "rest", "false"), +]) +@mock.patch.object(HealthChecksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(HealthChecksClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_health_checks_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (HealthChecksClient, transports.HealthChecksRestTransport, "rest"), +]) +def test_health_checks_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (HealthChecksClient, transports.HealthChecksRestTransport, "rest"), +]) +def test_health_checks_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListHealthChecksRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthChecksAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthChecksAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListHealthChecksRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthChecksAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthChecksAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/healthChecks" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListHealthChecksRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.HealthChecksAggregatedList( + items={ + 'a':compute.HealthChecksScopedList(), + 'b':compute.HealthChecksScopedList(), + 'c':compute.HealthChecksScopedList(), + }, + next_page_token='abc', + ), + compute.HealthChecksAggregatedList( + items={}, + next_page_token='def', + ), + compute.HealthChecksAggregatedList( + items={ + 'g':compute.HealthChecksScopedList(), + }, + next_page_token='ghi', + ), + compute.HealthChecksAggregatedList( + items={ + 'h':compute.HealthChecksScopedList(), + 'i':compute.HealthChecksScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.HealthChecksAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.HealthChecksScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.HealthChecksScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.HealthChecksScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "health_check": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + health_check='health_check_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteHealthCheckRequest(), + project='project_value', + health_check='health_check_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheck( + check_interval_sec=1884, + creation_timestamp='creation_timestamp_value', + description='description_value', + healthy_threshold=1819, + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + timeout_sec=1185, + type_='type__value', + unhealthy_threshold=2046, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheck.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.HealthCheck) + assert response.check_interval_sec == 1884 + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.healthy_threshold == 1819 + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.timeout_sec == 1185 + assert response.type_ == 'type__value' + assert response.unhealthy_threshold == 2046 + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheck() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheck.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "health_check": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + health_check='health_check_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetHealthCheckRequest(), + project='project_value', + health_check='health_check_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/healthChecks" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertHealthCheckRequest(), + project='project_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListHealthChecksRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListHealthChecksRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/healthChecks" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListHealthChecksRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.HealthCheckList( + items=[ + compute.HealthCheck(), + compute.HealthCheck(), + compute.HealthCheck(), + ], + next_page_token='abc', + ), + compute.HealthCheckList( + items=[], + next_page_token='def', + ), + compute.HealthCheckList( + items=[ + compute.HealthCheck(), + ], + next_page_token='ghi', + ), + compute.HealthCheckList( + items=[ + compute.HealthCheck(), + compute.HealthCheck(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.HealthCheckList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.HealthCheck) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "health_check": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchHealthCheckRequest(), + project='project_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateHealthCheckRequest): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "health_check": "sample2"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "health_check": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateHealthCheckRequest(), + project='project_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.HealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.HealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = HealthChecksClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.HealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = HealthChecksClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.HealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = HealthChecksClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.HealthChecksRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_health_checks_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.HealthChecksTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_health_checks_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.health_checks.transports.HealthChecksTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.HealthChecksTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_health_checks_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.health_checks.transports.HealthChecksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.HealthChecksTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_health_checks_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.health_checks.transports.HealthChecksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.HealthChecksTransport() + adc.assert_called_once() + + +def test_health_checks_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + HealthChecksClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_health_checks_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.HealthChecksRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_health_checks_host_no_port(): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_health_checks_host_with_port(): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = HealthChecksClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = HealthChecksClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = HealthChecksClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = HealthChecksClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = HealthChecksClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = HealthChecksClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = HealthChecksClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = HealthChecksClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = HealthChecksClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = HealthChecksClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = HealthChecksClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = HealthChecksClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = HealthChecksClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = HealthChecksClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = HealthChecksClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.HealthChecksTransport, '_prep_wrapped_messages') as prep: + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.HealthChecksTransport, '_prep_wrapped_messages') as prep: + transport_class = HealthChecksClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = HealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_image_family_views.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_image_family_views.py new file mode 100644 index 000000000..4a504d141 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_image_family_views.py @@ -0,0 +1,745 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.image_family_views import ImageFamilyViewsClient +from google.cloud.compute_v1.services.image_family_views import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ImageFamilyViewsClient._get_default_mtls_endpoint(None) is None + assert ImageFamilyViewsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ImageFamilyViewsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ImageFamilyViewsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ImageFamilyViewsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ImageFamilyViewsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ImageFamilyViewsClient, +]) +def test_image_family_views_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ImageFamilyViewsRestTransport, "rest"), +]) +def test_image_family_views_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ImageFamilyViewsClient, +]) +def test_image_family_views_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_image_family_views_client_get_transport_class(): + transport = ImageFamilyViewsClient.get_transport_class() + available_transports = [ + transports.ImageFamilyViewsRestTransport, + ] + assert transport in available_transports + + transport = ImageFamilyViewsClient.get_transport_class("rest") + assert transport == transports.ImageFamilyViewsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ImageFamilyViewsClient, transports.ImageFamilyViewsRestTransport, "rest"), +]) +@mock.patch.object(ImageFamilyViewsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ImageFamilyViewsClient)) +def test_image_family_views_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ImageFamilyViewsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ImageFamilyViewsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ImageFamilyViewsClient, transports.ImageFamilyViewsRestTransport, "rest", "true"), + (ImageFamilyViewsClient, transports.ImageFamilyViewsRestTransport, "rest", "false"), +]) +@mock.patch.object(ImageFamilyViewsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ImageFamilyViewsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_image_family_views_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ImageFamilyViewsClient, transports.ImageFamilyViewsRestTransport, "rest"), +]) +def test_image_family_views_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ImageFamilyViewsClient, transports.ImageFamilyViewsRestTransport, "rest"), +]) +def test_image_family_views_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetImageFamilyViewRequest): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "family": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ImageFamilyView( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ImageFamilyView.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.ImageFamilyView) + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetImageFamilyViewRequest): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "family": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ImageFamilyView() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ImageFamilyView.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "family": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + family='family_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/imageFamilyViews/{family}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetImageFamilyViewRequest(), + project='project_value', + zone='zone_value', + family='family_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ImageFamilyViewsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ImageFamilyViewsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ImageFamilyViewsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ImageFamilyViewsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ImageFamilyViewsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ImageFamilyViewsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ImageFamilyViewsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ImageFamilyViewsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_image_family_views_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ImageFamilyViewsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_image_family_views_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.image_family_views.transports.ImageFamilyViewsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ImageFamilyViewsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'get', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_image_family_views_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.image_family_views.transports.ImageFamilyViewsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ImageFamilyViewsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_image_family_views_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.image_family_views.transports.ImageFamilyViewsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ImageFamilyViewsTransport() + adc.assert_called_once() + + +def test_image_family_views_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ImageFamilyViewsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_image_family_views_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ImageFamilyViewsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_image_family_views_host_no_port(): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_image_family_views_host_with_port(): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ImageFamilyViewsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ImageFamilyViewsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ImageFamilyViewsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ImageFamilyViewsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ImageFamilyViewsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ImageFamilyViewsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ImageFamilyViewsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ImageFamilyViewsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ImageFamilyViewsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ImageFamilyViewsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ImageFamilyViewsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ImageFamilyViewsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ImageFamilyViewsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ImageFamilyViewsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ImageFamilyViewsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ImageFamilyViewsTransport, '_prep_wrapped_messages') as prep: + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ImageFamilyViewsTransport, '_prep_wrapped_messages') as prep: + transport_class = ImageFamilyViewsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ImageFamilyViewsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_images.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_images.py new file mode 100644 index 000000000..1f6da98a6 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_images.py @@ -0,0 +1,2213 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.images import ImagesClient +from google.cloud.compute_v1.services.images import pagers +from google.cloud.compute_v1.services.images import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ImagesClient._get_default_mtls_endpoint(None) is None + assert ImagesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ImagesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ImagesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ImagesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ImagesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ImagesClient, +]) +def test_images_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ImagesRestTransport, "rest"), +]) +def test_images_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ImagesClient, +]) +def test_images_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_images_client_get_transport_class(): + transport = ImagesClient.get_transport_class() + available_transports = [ + transports.ImagesRestTransport, + ] + assert transport in available_transports + + transport = ImagesClient.get_transport_class("rest") + assert transport == transports.ImagesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ImagesClient, transports.ImagesRestTransport, "rest"), +]) +@mock.patch.object(ImagesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ImagesClient)) +def test_images_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ImagesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ImagesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ImagesClient, transports.ImagesRestTransport, "rest", "true"), + (ImagesClient, transports.ImagesRestTransport, "rest", "false"), +]) +@mock.patch.object(ImagesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ImagesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_images_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ImagesClient, transports.ImagesRestTransport, "rest"), +]) +def test_images_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ImagesClient, transports.ImagesRestTransport, "rest"), +]) +def test_images_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "image": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + image='image_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{image}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteImageRequest(), + project='project_value', + image='image_value', + ) + + +def test_deprecate_rest(transport: str = 'rest', request_type=compute.DeprecateImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request_init["deprecation_status_resource"] = compute.DeprecationStatus(deleted='deleted_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.deprecate(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_deprecate_rest_bad_request(transport: str = 'rest', request_type=compute.DeprecateImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request_init["deprecation_status_resource"] = compute.DeprecationStatus(deleted='deleted_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.deprecate(request) + + +def test_deprecate_rest_from_dict(): + test_deprecate_rest(request_type=dict) + + +def test_deprecate_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "image": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + image='image_value', + deprecation_status_resource=compute.DeprecationStatus(deleted='deleted_value'), + ) + mock_args.update(sample_request) + client.deprecate(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{image}/deprecate" % client.transport._host, args[1]) + + +def test_deprecate_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.deprecate( + compute.DeprecateImageRequest(), + project='project_value', + image='image_value', + deprecation_status_resource=compute.DeprecationStatus(deleted='deleted_value'), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Image( + archive_size_bytes=1922, + creation_timestamp='creation_timestamp_value', + description='description_value', + disk_size_gb=1261, + family='family_value', + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + license_codes=[1360], + licenses=['licenses_value'], + name='name_value', + satisfies_pzs=True, + self_link='self_link_value', + source_disk='source_disk_value', + source_disk_id='source_disk_id_value', + source_image='source_image_value', + source_image_id='source_image_id_value', + source_snapshot='source_snapshot_value', + source_snapshot_id='source_snapshot_id_value', + source_type='source_type_value', + status='status_value', + storage_locations=['storage_locations_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Image.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Image) + assert response.archive_size_bytes == 1922 + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.disk_size_gb == 1261 + assert response.family == 'family_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.license_codes == [1360] + assert response.licenses == ['licenses_value'] + assert response.name == 'name_value' + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.source_disk == 'source_disk_value' + assert response.source_disk_id == 'source_disk_id_value' + assert response.source_image == 'source_image_value' + assert response.source_image_id == 'source_image_id_value' + assert response.source_snapshot == 'source_snapshot_value' + assert response.source_snapshot_id == 'source_snapshot_id_value' + assert response.source_type == 'source_type_value' + assert response.status == 'status_value' + assert response.storage_locations == ['storage_locations_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Image() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Image.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "image": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + image='image_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{image}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetImageRequest(), + project='project_value', + image='image_value', + ) + + +def test_get_from_family_rest(transport: str = 'rest', request_type=compute.GetFromFamilyImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "family": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Image( + archive_size_bytes=1922, + creation_timestamp='creation_timestamp_value', + description='description_value', + disk_size_gb=1261, + family='family_value', + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + license_codes=[1360], + licenses=['licenses_value'], + name='name_value', + satisfies_pzs=True, + self_link='self_link_value', + source_disk='source_disk_value', + source_disk_id='source_disk_id_value', + source_image='source_image_value', + source_image_id='source_image_id_value', + source_snapshot='source_snapshot_value', + source_snapshot_id='source_snapshot_id_value', + source_type='source_type_value', + status='status_value', + storage_locations=['storage_locations_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Image.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_from_family(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Image) + assert response.archive_size_bytes == 1922 + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.disk_size_gb == 1261 + assert response.family == 'family_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.license_codes == [1360] + assert response.licenses == ['licenses_value'] + assert response.name == 'name_value' + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.source_disk == 'source_disk_value' + assert response.source_disk_id == 'source_disk_id_value' + assert response.source_image == 'source_image_value' + assert response.source_image_id == 'source_image_id_value' + assert response.source_snapshot == 'source_snapshot_value' + assert response.source_snapshot_id == 'source_snapshot_id_value' + assert response.source_type == 'source_type_value' + assert response.status == 'status_value' + assert response.storage_locations == ['storage_locations_value'] + + +def test_get_from_family_rest_bad_request(transport: str = 'rest', request_type=compute.GetFromFamilyImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "family": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_from_family(request) + + +def test_get_from_family_rest_from_dict(): + test_get_from_family_rest(request_type=dict) + + +def test_get_from_family_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Image() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Image.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "family": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + family='family_value', + ) + mock_args.update(sample_request) + client.get_from_family(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/family/{family}" % client.transport._host, args[1]) + + +def test_get_from_family_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_from_family( + compute.GetFromFamilyImageRequest(), + project='project_value', + family='family_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyImageRequest(), + project='project_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["image_resource"] = compute.Image(archive_size_bytes=1922) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["image_resource"] = compute.Image(archive_size_bytes=1922) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + image_resource=compute.Image(archive_size_bytes=1922), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertImageRequest(), + project='project_value', + image_resource=compute.Image(archive_size_bytes=1922), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListImagesRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ImageList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ImageList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListImagesRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ImageList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ImageList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListImagesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ImageList( + items=[ + compute.Image(), + compute.Image(), + compute.Image(), + ], + next_page_token='abc', + ), + compute.ImageList( + items=[], + next_page_token='def', + ), + compute.ImageList( + items=[ + compute.Image(), + ], + next_page_token='ghi', + ), + compute.ImageList( + items=[ + compute.Image(), + compute.Image(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ImageList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Image) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request_init["image_resource"] = compute.Image(archive_size_bytes=1922) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "image": "sample2"} + request_init["image_resource"] = compute.Image(archive_size_bytes=1922) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "image": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + image='image_value', + image_resource=compute.Image(archive_size_bytes=1922), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{image}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchImageRequest(), + project='project_value', + image='image_value', + image_resource=compute.Image(archive_size_bytes=1922), + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyImageRequest(), + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsImageRequest(), + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsImageRequest): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/images/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsImageRequest(), + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ImagesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ImagesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ImagesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ImagesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ImagesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ImagesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ImagesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ImagesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_images_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ImagesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_images_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.images.transports.ImagesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ImagesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'deprecate', + 'get', + 'get_from_family', + 'get_iam_policy', + 'insert', + 'list', + 'patch', + 'set_iam_policy', + 'set_labels', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_images_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.images.transports.ImagesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ImagesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_images_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.images.transports.ImagesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ImagesTransport() + adc.assert_called_once() + + +def test_images_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ImagesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_images_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ImagesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_images_host_no_port(): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_images_host_with_port(): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ImagesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ImagesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ImagesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ImagesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ImagesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ImagesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ImagesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ImagesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ImagesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ImagesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ImagesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ImagesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ImagesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ImagesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ImagesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ImagesTransport, '_prep_wrapped_messages') as prep: + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ImagesTransport, '_prep_wrapped_messages') as prep: + transport_class = ImagesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ImagesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_group_managers.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_group_managers.py new file mode 100644 index 000000000..7d14fd74d --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_group_managers.py @@ -0,0 +1,3821 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.instance_group_managers import InstanceGroupManagersClient +from google.cloud.compute_v1.services.instance_group_managers import pagers +from google.cloud.compute_v1.services.instance_group_managers import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InstanceGroupManagersClient._get_default_mtls_endpoint(None) is None + assert InstanceGroupManagersClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert InstanceGroupManagersClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert InstanceGroupManagersClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert InstanceGroupManagersClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert InstanceGroupManagersClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + InstanceGroupManagersClient, +]) +def test_instance_group_managers_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.InstanceGroupManagersRestTransport, "rest"), +]) +def test_instance_group_managers_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + InstanceGroupManagersClient, +]) +def test_instance_group_managers_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instance_group_managers_client_get_transport_class(): + transport = InstanceGroupManagersClient.get_transport_class() + available_transports = [ + transports.InstanceGroupManagersRestTransport, + ] + assert transport in available_transports + + transport = InstanceGroupManagersClient.get_transport_class("rest") + assert transport == transports.InstanceGroupManagersRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceGroupManagersClient, transports.InstanceGroupManagersRestTransport, "rest"), +]) +@mock.patch.object(InstanceGroupManagersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstanceGroupManagersClient)) +def test_instance_group_managers_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InstanceGroupManagersClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InstanceGroupManagersClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (InstanceGroupManagersClient, transports.InstanceGroupManagersRestTransport, "rest", "true"), + (InstanceGroupManagersClient, transports.InstanceGroupManagersRestTransport, "rest", "false"), +]) +@mock.patch.object(InstanceGroupManagersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstanceGroupManagersClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_instance_group_managers_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceGroupManagersClient, transports.InstanceGroupManagersRestTransport, "rest"), +]) +def test_instance_group_managers_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceGroupManagersClient, transports.InstanceGroupManagersRestTransport, "rest"), +]) +def test_instance_group_managers_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_abandon_instances_rest(transport: str = 'rest', request_type=compute.AbandonInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_abandon_instances_request_resource"] = compute.InstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.abandon_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_abandon_instances_rest_bad_request(transport: str = 'rest', request_type=compute.AbandonInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_abandon_instances_request_resource"] = compute.InstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.abandon_instances(request) + + +def test_abandon_instances_rest_from_dict(): + test_abandon_instances_rest(request_type=dict) + + +def test_abandon_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_abandon_instances_request_resource=compute.InstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']), + ) + mock_args.update(sample_request) + client.abandon_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/abandonInstances" % client.transport._host, args[1]) + + +def test_abandon_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.abandon_instances( + compute.AbandonInstancesInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_abandon_instances_request_resource=compute.InstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']), + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagerAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagerAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagerAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagerAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/instanceGroupManagers" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListInstanceGroupManagersRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupManagerAggregatedList( + items={ + 'a':compute.InstanceGroupManagersScopedList(), + 'b':compute.InstanceGroupManagersScopedList(), + 'c':compute.InstanceGroupManagersScopedList(), + }, + next_page_token='abc', + ), + compute.InstanceGroupManagerAggregatedList( + items={}, + next_page_token='def', + ), + compute.InstanceGroupManagerAggregatedList( + items={ + 'g':compute.InstanceGroupManagersScopedList(), + }, + next_page_token='ghi', + ), + compute.InstanceGroupManagerAggregatedList( + items={ + 'h':compute.InstanceGroupManagersScopedList(), + 'i':compute.InstanceGroupManagersScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupManagerAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.InstanceGroupManagersScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.InstanceGroupManagersScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.InstanceGroupManagersScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_apply_updates_to_instances_rest(transport: str = 'rest', request_type=compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_apply_updates_request_resource"] = compute.InstanceGroupManagersApplyUpdatesRequest(all_instances=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.apply_updates_to_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_apply_updates_to_instances_rest_bad_request(transport: str = 'rest', request_type=compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_apply_updates_request_resource"] = compute.InstanceGroupManagersApplyUpdatesRequest(all_instances=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.apply_updates_to_instances(request) + + +def test_apply_updates_to_instances_rest_from_dict(): + test_apply_updates_to_instances_rest(request_type=dict) + + +def test_apply_updates_to_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_apply_updates_request_resource=compute.InstanceGroupManagersApplyUpdatesRequest(all_instances=True), + ) + mock_args.update(sample_request) + client.apply_updates_to_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/applyUpdatesToInstances" % client.transport._host, args[1]) + + +def test_apply_updates_to_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.apply_updates_to_instances( + compute.ApplyUpdatesToInstancesInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_apply_updates_request_resource=compute.InstanceGroupManagersApplyUpdatesRequest(all_instances=True), + ) + + +def test_create_instances_rest(transport: str = 'rest', request_type=compute.CreateInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_create_instances_request_resource"] = compute.InstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_create_instances_rest_bad_request(transport: str = 'rest', request_type=compute.CreateInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_create_instances_request_resource"] = compute.InstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_instances(request) + + +def test_create_instances_rest_from_dict(): + test_create_instances_rest(request_type=dict) + + +def test_create_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_create_instances_request_resource=compute.InstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + mock_args.update(sample_request) + client.create_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/createInstances" % client.transport._host, args[1]) + + +def test_create_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instances( + compute.CreateInstancesInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_create_instances_request_resource=compute.InstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_delete_instances_rest(transport: str = 'rest', request_type=compute.DeleteInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_delete_instances_request_resource"] = compute.InstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_instances_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_delete_instances_request_resource"] = compute.InstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_instances(request) + + +def test_delete_instances_rest_from_dict(): + test_delete_instances_rest(request_type=dict) + + +def test_delete_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_delete_instances_request_resource=compute.InstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']), + ) + mock_args.update(sample_request) + client.delete_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/deleteInstances" % client.transport._host, args[1]) + + +def test_delete_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instances( + compute.DeleteInstancesInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_delete_instances_request_resource=compute.InstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']), + ) + + +def test_delete_per_instance_configs_rest(transport: str = 'rest', request_type=compute.DeletePerInstanceConfigsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_delete_per_instance_configs_req_resource"] = compute.InstanceGroupManagersDeletePerInstanceConfigsReq(names=['names_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.DeletePerInstanceConfigsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_delete_per_instance_configs_req_resource"] = compute.InstanceGroupManagersDeletePerInstanceConfigsReq(names=['names_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_per_instance_configs(request) + + +def test_delete_per_instance_configs_rest_from_dict(): + test_delete_per_instance_configs_rest(request_type=dict) + + +def test_delete_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_delete_per_instance_configs_req_resource=compute.InstanceGroupManagersDeletePerInstanceConfigsReq(names=['names_value']), + ) + mock_args.update(sample_request) + client.delete_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/deletePerInstanceConfigs" % client.transport._host, args[1]) + + +def test_delete_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_per_instance_configs( + compute.DeletePerInstanceConfigsInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_delete_per_instance_configs_req_resource=compute.InstanceGroupManagersDeletePerInstanceConfigsReq(names=['names_value']), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManager( + base_instance_name='base_instance_name_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + instance_group='instance_group_value', + instance_template='instance_template_value', + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + target_pools=['target_pools_value'], + target_size=1185, + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManager.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InstanceGroupManager) + assert response.base_instance_name == 'base_instance_name_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.instance_group == 'instance_group_value' + assert response.instance_template == 'instance_template_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.target_pools == ['target_pools_value'] + assert response.target_size == 1185 + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManager() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManager.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagerList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagerList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagerList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagerList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListInstanceGroupManagersRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupManagerList( + items=[ + compute.InstanceGroupManager(), + compute.InstanceGroupManager(), + compute.InstanceGroupManager(), + ], + next_page_token='abc', + ), + compute.InstanceGroupManagerList( + items=[], + next_page_token='def', + ), + compute.InstanceGroupManagerList( + items=[ + compute.InstanceGroupManager(), + ], + next_page_token='ghi', + ), + compute.InstanceGroupManagerList( + items=[ + compute.InstanceGroupManager(), + compute.InstanceGroupManager(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupManagerList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceGroupManager) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_errors_rest(transport: str = 'rest', request_type=compute.ListErrorsInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagersListErrorsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagersListErrorsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_errors(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListErrorsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_errors_rest_bad_request(transport: str = 'rest', request_type=compute.ListErrorsInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_errors(request) + + +def test_list_errors_rest_from_dict(): + test_list_errors_rest(request_type=dict) + + +def test_list_errors_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagersListErrorsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagersListErrorsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.list_errors(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/listErrors" % client.transport._host, args[1]) + + +def test_list_errors_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_errors( + compute.ListErrorsInstanceGroupManagersRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_list_errors_rest_pager(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupManagersListErrorsResponse( + items=[ + compute.InstanceManagedByIgmError(), + compute.InstanceManagedByIgmError(), + compute.InstanceManagedByIgmError(), + ], + next_page_token='abc', + ), + compute.InstanceGroupManagersListErrorsResponse( + items=[], + next_page_token='def', + ), + compute.InstanceGroupManagersListErrorsResponse( + items=[ + compute.InstanceManagedByIgmError(), + ], + next_page_token='ghi', + ), + compute.InstanceGroupManagersListErrorsResponse( + items=[ + compute.InstanceManagedByIgmError(), + compute.InstanceManagedByIgmError(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupManagersListErrorsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + pager = client.list_errors(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceManagedByIgmError) + for i in results) + + pages = list(client.list_errors(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_managed_instances_rest(transport: str = 'rest', request_type=compute.ListManagedInstancesInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagersListManagedInstancesResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagersListManagedInstancesResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_managed_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListManagedInstancesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_managed_instances_rest_bad_request(transport: str = 'rest', request_type=compute.ListManagedInstancesInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_managed_instances(request) + + +def test_list_managed_instances_rest_from_dict(): + test_list_managed_instances_rest(request_type=dict) + + +def test_list_managed_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagersListManagedInstancesResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagersListManagedInstancesResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.list_managed_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/listManagedInstances" % client.transport._host, args[1]) + + +def test_list_managed_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_managed_instances( + compute.ListManagedInstancesInstanceGroupManagersRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_list_managed_instances_rest_pager(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupManagersListManagedInstancesResponse( + managed_instances=[ + compute.ManagedInstance(), + compute.ManagedInstance(), + compute.ManagedInstance(), + ], + next_page_token='abc', + ), + compute.InstanceGroupManagersListManagedInstancesResponse( + managed_instances=[], + next_page_token='def', + ), + compute.InstanceGroupManagersListManagedInstancesResponse( + managed_instances=[ + compute.ManagedInstance(), + ], + next_page_token='ghi', + ), + compute.InstanceGroupManagersListManagedInstancesResponse( + managed_instances=[ + compute.ManagedInstance(), + compute.ManagedInstance(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupManagersListManagedInstancesResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + pager = client.list_managed_instances(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ManagedInstance) + for i in results) + + pages = list(client.list_managed_instances(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_per_instance_configs_rest(transport: str = 'rest', request_type=compute.ListPerInstanceConfigsInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagersListPerInstanceConfigsResp( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagersListPerInstanceConfigsResp.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPerInstanceConfigsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.ListPerInstanceConfigsInstanceGroupManagersRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_per_instance_configs(request) + + +def test_list_per_instance_configs_rest_from_dict(): + test_list_per_instance_configs_rest(request_type=dict) + + +def test_list_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManagersListPerInstanceConfigsResp() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManagersListPerInstanceConfigsResp.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.list_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/listPerInstanceConfigs" % client.transport._host, args[1]) + + +def test_list_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_per_instance_configs( + compute.ListPerInstanceConfigsInstanceGroupManagersRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_list_per_instance_configs_rest_pager(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupManagersListPerInstanceConfigsResp( + items=[ + compute.PerInstanceConfig(), + compute.PerInstanceConfig(), + compute.PerInstanceConfig(), + ], + next_page_token='abc', + ), + compute.InstanceGroupManagersListPerInstanceConfigsResp( + items=[], + next_page_token='def', + ), + compute.InstanceGroupManagersListPerInstanceConfigsResp( + items=[ + compute.PerInstanceConfig(), + ], + next_page_token='ghi', + ), + compute.InstanceGroupManagersListPerInstanceConfigsResp( + items=[ + compute.PerInstanceConfig(), + compute.PerInstanceConfig(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupManagersListPerInstanceConfigsResp.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + pager = client.list_per_instance_configs(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.PerInstanceConfig) + for i in results) + + pages = list(client.list_per_instance_configs(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + + +def test_patch_per_instance_configs_rest(transport: str = 'rest', request_type=compute.PatchPerInstanceConfigsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_patch_per_instance_configs_req_resource"] = compute.InstanceGroupManagersPatchPerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.PatchPerInstanceConfigsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_patch_per_instance_configs_req_resource"] = compute.InstanceGroupManagersPatchPerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch_per_instance_configs(request) + + +def test_patch_per_instance_configs_rest_from_dict(): + test_patch_per_instance_configs_rest(request_type=dict) + + +def test_patch_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_patch_per_instance_configs_req_resource=compute.InstanceGroupManagersPatchPerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + mock_args.update(sample_request) + client.patch_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/patchPerInstanceConfigs" % client.transport._host, args[1]) + + +def test_patch_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch_per_instance_configs( + compute.PatchPerInstanceConfigsInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_patch_per_instance_configs_req_resource=compute.InstanceGroupManagersPatchPerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + + +def test_recreate_instances_rest(transport: str = 'rest', request_type=compute.RecreateInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_recreate_instances_request_resource"] = compute.InstanceGroupManagersRecreateInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.recreate_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_recreate_instances_rest_bad_request(transport: str = 'rest', request_type=compute.RecreateInstancesInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_recreate_instances_request_resource"] = compute.InstanceGroupManagersRecreateInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.recreate_instances(request) + + +def test_recreate_instances_rest_from_dict(): + test_recreate_instances_rest(request_type=dict) + + +def test_recreate_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_recreate_instances_request_resource=compute.InstanceGroupManagersRecreateInstancesRequest(instances=['instances_value']), + ) + mock_args.update(sample_request) + client.recreate_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/recreateInstances" % client.transport._host, args[1]) + + +def test_recreate_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.recreate_instances( + compute.RecreateInstancesInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_recreate_instances_request_resource=compute.InstanceGroupManagersRecreateInstancesRequest(instances=['instances_value']), + ) + + +def test_resize_rest(transport: str = 'rest', request_type=compute.ResizeInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.resize(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_resize_rest_bad_request(transport: str = 'rest', request_type=compute.ResizeInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.resize(request) + + +def test_resize_rest_from_dict(): + test_resize_rest(request_type=dict) + + +def test_resize_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + size=443, + ) + mock_args.update(sample_request) + client.resize(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/resize" % client.transport._host, args[1]) + + +def test_resize_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resize( + compute.ResizeInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + size=443, + ) + + +def test_set_instance_template_rest(transport: str = 'rest', request_type=compute.SetInstanceTemplateInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_set_instance_template_request_resource"] = compute.InstanceGroupManagersSetInstanceTemplateRequest(instance_template='instance_template_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_instance_template(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_instance_template_rest_bad_request(transport: str = 'rest', request_type=compute.SetInstanceTemplateInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_set_instance_template_request_resource"] = compute.InstanceGroupManagersSetInstanceTemplateRequest(instance_template='instance_template_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_instance_template(request) + + +def test_set_instance_template_rest_from_dict(): + test_set_instance_template_rest(request_type=dict) + + +def test_set_instance_template_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_set_instance_template_request_resource=compute.InstanceGroupManagersSetInstanceTemplateRequest(instance_template='instance_template_value'), + ) + mock_args.update(sample_request) + client.set_instance_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/setInstanceTemplate" % client.transport._host, args[1]) + + +def test_set_instance_template_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_instance_template( + compute.SetInstanceTemplateInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_set_instance_template_request_resource=compute.InstanceGroupManagersSetInstanceTemplateRequest(instance_template='instance_template_value'), + ) + + +def test_set_target_pools_rest(transport: str = 'rest', request_type=compute.SetTargetPoolsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_set_target_pools_request_resource"] = compute.InstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_target_pools(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_target_pools_rest_bad_request(transport: str = 'rest', request_type=compute.SetTargetPoolsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_set_target_pools_request_resource"] = compute.InstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_target_pools(request) + + +def test_set_target_pools_rest_from_dict(): + test_set_target_pools_rest(request_type=dict) + + +def test_set_target_pools_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_set_target_pools_request_resource=compute.InstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_target_pools(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/setTargetPools" % client.transport._host, args[1]) + + +def test_set_target_pools_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_target_pools( + compute.SetTargetPoolsInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_set_target_pools_request_resource=compute.InstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value'), + ) + + +def test_update_per_instance_configs_rest(transport: str = 'rest', request_type=compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_update_per_instance_configs_req_resource"] = compute.InstanceGroupManagersUpdatePerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_managers_update_per_instance_configs_req_resource"] = compute.InstanceGroupManagersUpdatePerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_per_instance_configs(request) + + +def test_update_per_instance_configs_rest_from_dict(): + test_update_per_instance_configs_rest(request_type=dict) + + +def test_update_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_update_per_instance_configs_req_resource=compute.InstanceGroupManagersUpdatePerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + mock_args.update(sample_request) + client.update_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroupManagers/{instance_group_manager}/updatePerInstanceConfigs" % client.transport._host, args[1]) + + +def test_update_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_per_instance_configs( + compute.UpdatePerInstanceConfigsInstanceGroupManagerRequest(), + project='project_value', + zone='zone_value', + instance_group_manager='instance_group_manager_value', + instance_group_managers_update_per_instance_configs_req_resource=compute.InstanceGroupManagersUpdatePerInstanceConfigsReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceGroupManagersClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceGroupManagersClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = InstanceGroupManagersClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.InstanceGroupManagersRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_instance_group_managers_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.InstanceGroupManagersTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_instance_group_managers_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.instance_group_managers.transports.InstanceGroupManagersTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.InstanceGroupManagersTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'abandon_instances', + 'aggregated_list', + 'apply_updates_to_instances', + 'create_instances', + 'delete', + 'delete_instances', + 'delete_per_instance_configs', + 'get', + 'insert', + 'list', + 'list_errors', + 'list_managed_instances', + 'list_per_instance_configs', + 'patch', + 'patch_per_instance_configs', + 'recreate_instances', + 'resize', + 'set_instance_template', + 'set_target_pools', + 'update_per_instance_configs', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_instance_group_managers_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.instance_group_managers.transports.InstanceGroupManagersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstanceGroupManagersTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_instance_group_managers_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.instance_group_managers.transports.InstanceGroupManagersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstanceGroupManagersTransport() + adc.assert_called_once() + + +def test_instance_group_managers_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + InstanceGroupManagersClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_instance_group_managers_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.InstanceGroupManagersRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_instance_group_managers_host_no_port(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instance_group_managers_host_with_port(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = InstanceGroupManagersClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = InstanceGroupManagersClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupManagersClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = InstanceGroupManagersClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = InstanceGroupManagersClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupManagersClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = InstanceGroupManagersClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = InstanceGroupManagersClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupManagersClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = InstanceGroupManagersClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = InstanceGroupManagersClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupManagersClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = InstanceGroupManagersClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = InstanceGroupManagersClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupManagersClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.InstanceGroupManagersTransport, '_prep_wrapped_messages') as prep: + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.InstanceGroupManagersTransport, '_prep_wrapped_messages') as prep: + transport_class = InstanceGroupManagersClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = InstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_groups.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_groups.py new file mode 100644 index 000000000..e84567dc2 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_groups.py @@ -0,0 +1,2084 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.instance_groups import InstanceGroupsClient +from google.cloud.compute_v1.services.instance_groups import pagers +from google.cloud.compute_v1.services.instance_groups import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InstanceGroupsClient._get_default_mtls_endpoint(None) is None + assert InstanceGroupsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert InstanceGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert InstanceGroupsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert InstanceGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert InstanceGroupsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + InstanceGroupsClient, +]) +def test_instance_groups_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.InstanceGroupsRestTransport, "rest"), +]) +def test_instance_groups_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + InstanceGroupsClient, +]) +def test_instance_groups_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instance_groups_client_get_transport_class(): + transport = InstanceGroupsClient.get_transport_class() + available_transports = [ + transports.InstanceGroupsRestTransport, + ] + assert transport in available_transports + + transport = InstanceGroupsClient.get_transport_class("rest") + assert transport == transports.InstanceGroupsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceGroupsClient, transports.InstanceGroupsRestTransport, "rest"), +]) +@mock.patch.object(InstanceGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstanceGroupsClient)) +def test_instance_groups_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InstanceGroupsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InstanceGroupsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (InstanceGroupsClient, transports.InstanceGroupsRestTransport, "rest", "true"), + (InstanceGroupsClient, transports.InstanceGroupsRestTransport, "rest", "false"), +]) +@mock.patch.object(InstanceGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstanceGroupsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_instance_groups_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceGroupsClient, transports.InstanceGroupsRestTransport, "rest"), +]) +def test_instance_groups_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceGroupsClient, transports.InstanceGroupsRestTransport, "rest"), +]) +def test_instance_groups_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_instances_rest(transport: str = 'rest', request_type=compute.AddInstancesInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_add_instances_request_resource"] = compute.InstanceGroupsAddInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_instances_rest_bad_request(transport: str = 'rest', request_type=compute.AddInstancesInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_add_instances_request_resource"] = compute.InstanceGroupsAddInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_instances(request) + + +def test_add_instances_rest_from_dict(): + test_add_instances_rest(request_type=dict) + + +def test_add_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_add_instances_request_resource=compute.InstanceGroupsAddInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + mock_args.update(sample_request) + client.add_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/addInstances" % client.transport._host, args[1]) + + +def test_add_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_instances( + compute.AddInstancesInstanceGroupRequest(), + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_add_instances_request_resource=compute.InstanceGroupsAddInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListInstanceGroupsRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListInstanceGroupsRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/instanceGroups" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListInstanceGroupsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupAggregatedList( + items={ + 'a':compute.InstanceGroupsScopedList(), + 'b':compute.InstanceGroupsScopedList(), + 'c':compute.InstanceGroupsScopedList(), + }, + next_page_token='abc', + ), + compute.InstanceGroupAggregatedList( + items={}, + next_page_token='def', + ), + compute.InstanceGroupAggregatedList( + items={ + 'g':compute.InstanceGroupsScopedList(), + }, + next_page_token='ghi', + ), + compute.InstanceGroupAggregatedList( + items={ + 'h':compute.InstanceGroupsScopedList(), + 'i':compute.InstanceGroupsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.InstanceGroupsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.InstanceGroupsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.InstanceGroupsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteInstanceGroupRequest(), + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroup( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + network='network_value', + region='region_value', + self_link='self_link_value', + size=443, + subnetwork='subnetwork_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroup.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InstanceGroup) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.size == 443 + assert response.subnetwork == 'subnetwork_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroup() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroup.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetInstanceGroupRequest(), + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["instance_group_resource"] = compute.InstanceGroup(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["instance_group_resource"] = compute.InstanceGroup(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group_resource=compute.InstanceGroup(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertInstanceGroupRequest(), + project='project_value', + zone='zone_value', + instance_group_resource=compute.InstanceGroup(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListInstanceGroupsRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListInstanceGroupsRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListInstanceGroupsRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupList( + items=[ + compute.InstanceGroup(), + compute.InstanceGroup(), + compute.InstanceGroup(), + ], + next_page_token='abc', + ), + compute.InstanceGroupList( + items=[], + next_page_token='def', + ), + compute.InstanceGroupList( + items=[ + compute.InstanceGroup(), + ], + next_page_token='ghi', + ), + compute.InstanceGroupList( + items=[ + compute.InstanceGroup(), + compute.InstanceGroup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceGroup) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_instances_rest(transport: str = 'rest', request_type=compute.ListInstancesInstanceGroupsRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_list_instances_request_resource"] = compute.InstanceGroupsListInstancesRequest(instance_state='instance_state_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupsListInstances( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupsListInstances.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListInstancesPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_instances_rest_bad_request(transport: str = 'rest', request_type=compute.ListInstancesInstanceGroupsRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_list_instances_request_resource"] = compute.InstanceGroupsListInstancesRequest(instance_state='instance_state_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_instances(request) + + +def test_list_instances_rest_from_dict(): + test_list_instances_rest(request_type=dict) + + +def test_list_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupsListInstances() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupsListInstances.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_list_instances_request_resource=compute.InstanceGroupsListInstancesRequest(instance_state='instance_state_value'), + ) + mock_args.update(sample_request) + client.list_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/listInstances" % client.transport._host, args[1]) + + +def test_list_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + compute.ListInstancesInstanceGroupsRequest(), + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_list_instances_request_resource=compute.InstanceGroupsListInstancesRequest(instance_state='instance_state_value'), + ) + + +def test_list_instances_rest_pager(): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceGroupsListInstances( + items=[ + compute.InstanceWithNamedPorts(), + compute.InstanceWithNamedPorts(), + compute.InstanceWithNamedPorts(), + ], + next_page_token='abc', + ), + compute.InstanceGroupsListInstances( + items=[], + next_page_token='def', + ), + compute.InstanceGroupsListInstances( + items=[ + compute.InstanceWithNamedPorts(), + ], + next_page_token='ghi', + ), + compute.InstanceGroupsListInstances( + items=[ + compute.InstanceWithNamedPorts(), + compute.InstanceWithNamedPorts(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceGroupsListInstances.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + sample_request["instance_groups_list_instances_request_resource"] = compute.InstanceGroupsListInstancesRequest(instance_state='instance_state_value') + + pager = client.list_instances(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceWithNamedPorts) + for i in results) + + pages = list(client.list_instances(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_remove_instances_rest(transport: str = 'rest', request_type=compute.RemoveInstancesInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_remove_instances_request_resource"] = compute.InstanceGroupsRemoveInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_instances_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveInstancesInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_remove_instances_request_resource"] = compute.InstanceGroupsRemoveInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_instances(request) + + +def test_remove_instances_rest_from_dict(): + test_remove_instances_rest(request_type=dict) + + +def test_remove_instances_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_remove_instances_request_resource=compute.InstanceGroupsRemoveInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + mock_args.update(sample_request) + client.remove_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/removeInstances" % client.transport._host, args[1]) + + +def test_remove_instances_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_instances( + compute.RemoveInstancesInstanceGroupRequest(), + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_remove_instances_request_resource=compute.InstanceGroupsRemoveInstancesRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + + +def test_set_named_ports_rest(transport: str = 'rest', request_type=compute.SetNamedPortsInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_set_named_ports_request_resource"] = compute.InstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_named_ports(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_named_ports_rest_bad_request(transport: str = 'rest', request_type=compute.SetNamedPortsInstanceGroupRequest): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + request_init["instance_groups_set_named_ports_request_resource"] = compute.InstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_named_ports(request) + + +def test_set_named_ports_rest_from_dict(): + test_set_named_ports_rest(request_type=dict) + + +def test_set_named_ports_rest_flattened(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_set_named_ports_request_resource=compute.InstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_named_ports(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instanceGroups/{instance_group}/setNamedPorts" % client.transport._host, args[1]) + + +def test_set_named_ports_rest_flattened_error(transport: str = 'rest'): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_named_ports( + compute.SetNamedPortsInstanceGroupRequest(), + project='project_value', + zone='zone_value', + instance_group='instance_group_value', + instance_groups_set_named_ports_request_resource=compute.InstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceGroupsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceGroupsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = InstanceGroupsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.InstanceGroupsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_instance_groups_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.InstanceGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_instance_groups_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.instance_groups.transports.InstanceGroupsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.InstanceGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_instances', + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'list_instances', + 'remove_instances', + 'set_named_ports', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_instance_groups_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.instance_groups.transports.InstanceGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstanceGroupsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_instance_groups_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.instance_groups.transports.InstanceGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstanceGroupsTransport() + adc.assert_called_once() + + +def test_instance_groups_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + InstanceGroupsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_instance_groups_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.InstanceGroupsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_instance_groups_host_no_port(): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instance_groups_host_with_port(): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = InstanceGroupsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = InstanceGroupsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = InstanceGroupsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = InstanceGroupsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = InstanceGroupsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = InstanceGroupsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = InstanceGroupsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = InstanceGroupsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = InstanceGroupsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = InstanceGroupsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceGroupsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.InstanceGroupsTransport, '_prep_wrapped_messages') as prep: + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.InstanceGroupsTransport, '_prep_wrapped_messages') as prep: + transport_class = InstanceGroupsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = InstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_templates.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_templates.py new file mode 100644 index 000000000..b079d97ed --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instance_templates.py @@ -0,0 +1,1571 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.instance_templates import InstanceTemplatesClient +from google.cloud.compute_v1.services.instance_templates import pagers +from google.cloud.compute_v1.services.instance_templates import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InstanceTemplatesClient._get_default_mtls_endpoint(None) is None + assert InstanceTemplatesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert InstanceTemplatesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert InstanceTemplatesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert InstanceTemplatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert InstanceTemplatesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + InstanceTemplatesClient, +]) +def test_instance_templates_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.InstanceTemplatesRestTransport, "rest"), +]) +def test_instance_templates_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + InstanceTemplatesClient, +]) +def test_instance_templates_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instance_templates_client_get_transport_class(): + transport = InstanceTemplatesClient.get_transport_class() + available_transports = [ + transports.InstanceTemplatesRestTransport, + ] + assert transport in available_transports + + transport = InstanceTemplatesClient.get_transport_class("rest") + assert transport == transports.InstanceTemplatesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"), +]) +@mock.patch.object(InstanceTemplatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstanceTemplatesClient)) +def test_instance_templates_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InstanceTemplatesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InstanceTemplatesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest", "true"), + (InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest", "false"), +]) +@mock.patch.object(InstanceTemplatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstanceTemplatesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_instance_templates_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"), +]) +def test_instance_templates_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstanceTemplatesClient, transports.InstanceTemplatesRestTransport, "rest"), +]) +def test_instance_templates_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "instance_template": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "instance_template": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "instance_template": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + instance_template='instance_template_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/instanceTemplates/{instance_template}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteInstanceTemplateRequest(), + project='project_value', + instance_template='instance_template_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "instance_template": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceTemplate( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + source_instance='source_instance_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceTemplate.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InstanceTemplate) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.source_instance == 'source_instance_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "instance_template": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceTemplate() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceTemplate.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "instance_template": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + instance_template='instance_template_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/instanceTemplates/{instance_template}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetInstanceTemplateRequest(), + project='project_value', + instance_template='instance_template_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/instanceTemplates/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyInstanceTemplateRequest(), + project='project_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["instance_template_resource"] = compute.InstanceTemplate(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["instance_template_resource"] = compute.InstanceTemplate(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + instance_template_resource=compute.InstanceTemplate(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/instanceTemplates" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertInstanceTemplateRequest(), + project='project_value', + instance_template_resource=compute.InstanceTemplate(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListInstanceTemplatesRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceTemplateList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceTemplateList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListInstanceTemplatesRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceTemplateList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceTemplateList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/instanceTemplates" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListInstanceTemplatesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceTemplateList( + items=[ + compute.InstanceTemplate(), + compute.InstanceTemplate(), + compute.InstanceTemplate(), + ], + next_page_token='abc', + ), + compute.InstanceTemplateList( + items=[], + next_page_token='def', + ), + compute.InstanceTemplateList( + items=[ + compute.InstanceTemplate(), + ], + next_page_token='ghi', + ), + compute.InstanceTemplateList( + items=[ + compute.InstanceTemplate(), + compute.InstanceTemplate(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceTemplateList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceTemplate) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/instanceTemplates/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyInstanceTemplateRequest(), + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsInstanceTemplateRequest): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/instanceTemplates/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsInstanceTemplateRequest(), + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InstanceTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InstanceTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceTemplatesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InstanceTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstanceTemplatesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InstanceTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = InstanceTemplatesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.InstanceTemplatesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_instance_templates_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.InstanceTemplatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_instance_templates_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.InstanceTemplatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_instance_templates_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstanceTemplatesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_instance_templates_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.instance_templates.transports.InstanceTemplatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstanceTemplatesTransport() + adc.assert_called_once() + + +def test_instance_templates_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + InstanceTemplatesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_instance_templates_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.InstanceTemplatesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_instance_templates_host_no_port(): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instance_templates_host_with_port(): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = InstanceTemplatesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = InstanceTemplatesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceTemplatesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = InstanceTemplatesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = InstanceTemplatesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceTemplatesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = InstanceTemplatesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = InstanceTemplatesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceTemplatesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = InstanceTemplatesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = InstanceTemplatesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceTemplatesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = InstanceTemplatesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = InstanceTemplatesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InstanceTemplatesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.InstanceTemplatesTransport, '_prep_wrapped_messages') as prep: + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.InstanceTemplatesTransport, '_prep_wrapped_messages') as prep: + transport_class = InstanceTemplatesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = InstanceTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instances.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instances.py new file mode 100644 index 000000000..6f71c69ad --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_instances.py @@ -0,0 +1,7007 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.instances import InstancesClient +from google.cloud.compute_v1.services.instances import pagers +from google.cloud.compute_v1.services.instances import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InstancesClient._get_default_mtls_endpoint(None) is None + assert InstancesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert InstancesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert InstancesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert InstancesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert InstancesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + InstancesClient, +]) +def test_instances_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.InstancesRestTransport, "rest"), +]) +def test_instances_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + InstancesClient, +]) +def test_instances_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instances_client_get_transport_class(): + transport = InstancesClient.get_transport_class() + available_transports = [ + transports.InstancesRestTransport, + ] + assert transport in available_transports + + transport = InstancesClient.get_transport_class("rest") + assert transport == transports.InstancesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstancesClient, transports.InstancesRestTransport, "rest"), +]) +@mock.patch.object(InstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstancesClient)) +def test_instances_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InstancesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InstancesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (InstancesClient, transports.InstancesRestTransport, "rest", "true"), + (InstancesClient, transports.InstancesRestTransport, "rest", "false"), +]) +@mock.patch.object(InstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InstancesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_instances_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstancesClient, transports.InstancesRestTransport, "rest"), +]) +def test_instances_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InstancesClient, transports.InstancesRestTransport, "rest"), +]) +def test_instances_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_access_config_rest(transport: str = 'rest', request_type=compute.AddAccessConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["access_config_resource"] = compute.AccessConfig(external_ipv6='external_ipv6_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_access_config(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_access_config_rest_bad_request(transport: str = 'rest', request_type=compute.AddAccessConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["access_config_resource"] = compute.AccessConfig(external_ipv6='external_ipv6_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_access_config(request) + + +def test_add_access_config_rest_from_dict(): + test_add_access_config_rest(request_type=dict) + + +def test_add_access_config_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + access_config_resource=compute.AccessConfig(external_ipv6='external_ipv6_value'), + ) + mock_args.update(sample_request) + client.add_access_config(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/addAccessConfig" % client.transport._host, args[1]) + + +def test_add_access_config_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_access_config( + compute.AddAccessConfigInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + access_config_resource=compute.AccessConfig(external_ipv6='external_ipv6_value'), + ) + + +def test_add_resource_policies_rest(transport: str = 'rest', request_type=compute.AddResourcePoliciesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_add_resource_policies_request_resource"] = compute.InstancesAddResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_resource_policies(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_resource_policies_rest_bad_request(transport: str = 'rest', request_type=compute.AddResourcePoliciesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_add_resource_policies_request_resource"] = compute.InstancesAddResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_resource_policies(request) + + +def test_add_resource_policies_rest_from_dict(): + test_add_resource_policies_rest(request_type=dict) + + +def test_add_resource_policies_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_add_resource_policies_request_resource=compute.InstancesAddResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + mock_args.update(sample_request) + client.add_resource_policies(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/addResourcePolicies" % client.transport._host, args[1]) + + +def test_add_resource_policies_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_resource_policies( + compute.AddResourcePoliciesInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_add_resource_policies_request_resource=compute.InstancesAddResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListInstancesRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListInstancesRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/instances" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListInstancesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceAggregatedList( + items={ + 'a':compute.InstancesScopedList(), + 'b':compute.InstancesScopedList(), + 'c':compute.InstancesScopedList(), + }, + next_page_token='abc', + ), + compute.InstanceAggregatedList( + items={}, + next_page_token='def', + ), + compute.InstanceAggregatedList( + items={ + 'g':compute.InstancesScopedList(), + }, + next_page_token='ghi', + ), + compute.InstanceAggregatedList( + items={ + 'h':compute.InstancesScopedList(), + 'i':compute.InstancesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.InstancesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.InstancesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.InstancesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_attach_disk_rest(transport: str = 'rest', request_type=compute.AttachDiskInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["attached_disk_resource"] = compute.AttachedDisk(auto_delete=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.attach_disk(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_attach_disk_rest_bad_request(transport: str = 'rest', request_type=compute.AttachDiskInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["attached_disk_resource"] = compute.AttachedDisk(auto_delete=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.attach_disk(request) + + +def test_attach_disk_rest_from_dict(): + test_attach_disk_rest(request_type=dict) + + +def test_attach_disk_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + attached_disk_resource=compute.AttachedDisk(auto_delete=True), + ) + mock_args.update(sample_request) + client.attach_disk(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/attachDisk" % client.transport._host, args[1]) + + +def test_attach_disk_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.attach_disk( + compute.AttachDiskInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + attached_disk_resource=compute.AttachedDisk(auto_delete=True), + ) + + +def test_bulk_insert_rest(transport: str = 'rest', request_type=compute.BulkInsertInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["bulk_insert_instance_resource_resource"] = compute.BulkInsertInstanceResource(count=553) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.bulk_insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_bulk_insert_rest_bad_request(transport: str = 'rest', request_type=compute.BulkInsertInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["bulk_insert_instance_resource_resource"] = compute.BulkInsertInstanceResource(count=553) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.bulk_insert(request) + + +def test_bulk_insert_rest_from_dict(): + test_bulk_insert_rest(request_type=dict) + + +def test_bulk_insert_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + bulk_insert_instance_resource_resource=compute.BulkInsertInstanceResource(count=553), + ) + mock_args.update(sample_request) + client.bulk_insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/bulkInsert" % client.transport._host, args[1]) + + +def test_bulk_insert_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.bulk_insert( + compute.BulkInsertInstanceRequest(), + project='project_value', + zone='zone_value', + bulk_insert_instance_resource_resource=compute.BulkInsertInstanceResource(count=553), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_delete_access_config_rest(transport: str = 'rest', request_type=compute.DeleteAccessConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_access_config(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_access_config_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteAccessConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_access_config(request) + + +def test_delete_access_config_rest_from_dict(): + test_delete_access_config_rest(request_type=dict) + + +def test_delete_access_config_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + access_config='access_config_value', + network_interface='network_interface_value', + ) + mock_args.update(sample_request) + client.delete_access_config(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig" % client.transport._host, args[1]) + + +def test_delete_access_config_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_access_config( + compute.DeleteAccessConfigInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + access_config='access_config_value', + network_interface='network_interface_value', + ) + + +def test_detach_disk_rest(transport: str = 'rest', request_type=compute.DetachDiskInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.detach_disk(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_detach_disk_rest_bad_request(transport: str = 'rest', request_type=compute.DetachDiskInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.detach_disk(request) + + +def test_detach_disk_rest_from_dict(): + test_detach_disk_rest(request_type=dict) + + +def test_detach_disk_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + device_name='device_name_value', + ) + mock_args.update(sample_request) + client.detach_disk(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/detachDisk" % client.transport._host, args[1]) + + +def test_detach_disk_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.detach_disk( + compute.DetachDiskInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + device_name='device_name_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Instance( + can_ip_forward=True, + cpu_platform='cpu_platform_value', + creation_timestamp='creation_timestamp_value', + deletion_protection=True, + description='description_value', + fingerprint='fingerprint_value', + hostname='hostname_value', + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + last_start_timestamp='last_start_timestamp_value', + last_stop_timestamp='last_stop_timestamp_value', + last_suspended_timestamp='last_suspended_timestamp_value', + machine_type='machine_type_value', + min_cpu_platform='min_cpu_platform_value', + name='name_value', + private_ipv6_google_access='private_ipv6_google_access_value', + resource_policies=['resource_policies_value'], + satisfies_pzs=True, + self_link='self_link_value', + start_restricted=True, + status='status_value', + status_message='status_message_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Instance.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Instance) + assert response.can_ip_forward is True + assert response.cpu_platform == 'cpu_platform_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.deletion_protection is True + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.hostname == 'hostname_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.last_start_timestamp == 'last_start_timestamp_value' + assert response.last_stop_timestamp == 'last_stop_timestamp_value' + assert response.last_suspended_timestamp == 'last_suspended_timestamp_value' + assert response.machine_type == 'machine_type_value' + assert response.min_cpu_platform == 'min_cpu_platform_value' + assert response.name == 'name_value' + assert response.private_ipv6_google_access == 'private_ipv6_google_access_value' + assert response.resource_policies == ['resource_policies_value'] + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.start_restricted is True + assert response.status == 'status_value' + assert response.status_message == 'status_message_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Instance() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Instance.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_get_effective_firewalls_rest(transport: str = 'rest', request_type=compute.GetEffectiveFirewallsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstancesGetEffectiveFirewallsResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstancesGetEffectiveFirewallsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_effective_firewalls(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InstancesGetEffectiveFirewallsResponse) + + +def test_get_effective_firewalls_rest_bad_request(transport: str = 'rest', request_type=compute.GetEffectiveFirewallsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_effective_firewalls(request) + + +def test_get_effective_firewalls_rest_from_dict(): + test_get_effective_firewalls_rest(request_type=dict) + + +def test_get_effective_firewalls_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstancesGetEffectiveFirewallsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstancesGetEffectiveFirewallsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + ) + mock_args.update(sample_request) + client.get_effective_firewalls(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/getEffectiveFirewalls" % client.transport._host, args[1]) + + +def test_get_effective_firewalls_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_effective_firewalls( + compute.GetEffectiveFirewallsInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + ) + + +def test_get_guest_attributes_rest(transport: str = 'rest', request_type=compute.GetGuestAttributesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.GuestAttributes( + kind='kind_value', + query_path='query_path_value', + self_link='self_link_value', + variable_key='variable_key_value', + variable_value='variable_value_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.GuestAttributes.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_guest_attributes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.GuestAttributes) + assert response.kind == 'kind_value' + assert response.query_path == 'query_path_value' + assert response.self_link == 'self_link_value' + assert response.variable_key == 'variable_key_value' + assert response.variable_value == 'variable_value_value' + + +def test_get_guest_attributes_rest_bad_request(transport: str = 'rest', request_type=compute.GetGuestAttributesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_guest_attributes(request) + + +def test_get_guest_attributes_rest_from_dict(): + test_get_guest_attributes_rest(request_type=dict) + + +def test_get_guest_attributes_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.GuestAttributes() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.GuestAttributes.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.get_guest_attributes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes" % client.transport._host, args[1]) + + +def test_get_guest_attributes_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_guest_attributes( + compute.GetGuestAttributesInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyInstanceRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + ) + + +def test_get_screenshot_rest(transport: str = 'rest', request_type=compute.GetScreenshotInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Screenshot( + contents='contents_value', + kind='kind_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Screenshot.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_screenshot(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Screenshot) + assert response.contents == 'contents_value' + assert response.kind == 'kind_value' + + +def test_get_screenshot_rest_bad_request(transport: str = 'rest', request_type=compute.GetScreenshotInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_screenshot(request) + + +def test_get_screenshot_rest_from_dict(): + test_get_screenshot_rest(request_type=dict) + + +def test_get_screenshot_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Screenshot() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Screenshot.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.get_screenshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/screenshot" % client.transport._host, args[1]) + + +def test_get_screenshot_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_screenshot( + compute.GetScreenshotInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_get_serial_port_output_rest(transport: str = 'rest', request_type=compute.GetSerialPortOutputInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SerialPortOutput( + contents='contents_value', + kind='kind_value', + next_=542, + self_link='self_link_value', + start=558, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SerialPortOutput.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_serial_port_output(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SerialPortOutput) + assert response.contents == 'contents_value' + assert response.kind == 'kind_value' + assert response.next_ == 542 + assert response.self_link == 'self_link_value' + assert response.start == 558 + + +def test_get_serial_port_output_rest_bad_request(transport: str = 'rest', request_type=compute.GetSerialPortOutputInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_serial_port_output(request) + + +def test_get_serial_port_output_rest_from_dict(): + test_get_serial_port_output_rest(request_type=dict) + + +def test_get_serial_port_output_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SerialPortOutput() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SerialPortOutput.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.get_serial_port_output(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/serialPort" % client.transport._host, args[1]) + + +def test_get_serial_port_output_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_serial_port_output( + compute.GetSerialPortOutputInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_get_shielded_instance_identity_rest(transport: str = 'rest', request_type=compute.GetShieldedInstanceIdentityInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ShieldedInstanceIdentity( + kind='kind_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ShieldedInstanceIdentity.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_shielded_instance_identity(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.ShieldedInstanceIdentity) + assert response.kind == 'kind_value' + + +def test_get_shielded_instance_identity_rest_bad_request(transport: str = 'rest', request_type=compute.GetShieldedInstanceIdentityInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_shielded_instance_identity(request) + + +def test_get_shielded_instance_identity_rest_from_dict(): + test_get_shielded_instance_identity_rest(request_type=dict) + + +def test_get_shielded_instance_identity_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ShieldedInstanceIdentity() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ShieldedInstanceIdentity.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.get_shielded_instance_identity(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/getShieldedInstanceIdentity" % client.transport._host, args[1]) + + +def test_get_shielded_instance_identity_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_shielded_instance_identity( + compute.GetShieldedInstanceIdentityInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["instance_resource"] = compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["instance_resource"] = compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance_resource=compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertInstanceRequest(), + project='project_value', + zone='zone_value', + instance_resource=compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListInstancesRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListInstancesRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListInstancesRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceList( + items=[ + compute.Instance(), + compute.Instance(), + compute.Instance(), + ], + next_page_token='abc', + ), + compute.InstanceList( + items=[], + next_page_token='def', + ), + compute.InstanceList( + items=[ + compute.Instance(), + ], + next_page_token='ghi', + ), + compute.InstanceList( + items=[ + compute.Instance(), + compute.Instance(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Instance) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_referrers_rest(transport: str = 'rest', request_type=compute.ListReferrersInstancesRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceListReferrers( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceListReferrers.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_referrers(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListReferrersPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_referrers_rest_bad_request(transport: str = 'rest', request_type=compute.ListReferrersInstancesRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_referrers(request) + + +def test_list_referrers_rest_from_dict(): + test_list_referrers_rest(request_type=dict) + + +def test_list_referrers_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceListReferrers() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceListReferrers.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.list_referrers(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/referrers" % client.transport._host, args[1]) + + +def test_list_referrers_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_referrers( + compute.ListReferrersInstancesRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_list_referrers_rest_pager(): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InstanceListReferrers( + items=[ + compute.Reference(), + compute.Reference(), + compute.Reference(), + ], + next_page_token='abc', + ), + compute.InstanceListReferrers( + items=[], + next_page_token='def', + ), + compute.InstanceListReferrers( + items=[ + compute.Reference(), + ], + next_page_token='ghi', + ), + compute.InstanceListReferrers( + items=[ + compute.Reference(), + compute.Reference(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InstanceListReferrers.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + pager = client.list_referrers(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Reference) + for i in results) + + pages = list(client.list_referrers(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_remove_resource_policies_rest(transport: str = 'rest', request_type=compute.RemoveResourcePoliciesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_remove_resource_policies_request_resource"] = compute.InstancesRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_resource_policies(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_resource_policies_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveResourcePoliciesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_remove_resource_policies_request_resource"] = compute.InstancesRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_resource_policies(request) + + +def test_remove_resource_policies_rest_from_dict(): + test_remove_resource_policies_rest(request_type=dict) + + +def test_remove_resource_policies_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_remove_resource_policies_request_resource=compute.InstancesRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + mock_args.update(sample_request) + client.remove_resource_policies(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/removeResourcePolicies" % client.transport._host, args[1]) + + +def test_remove_resource_policies_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_resource_policies( + compute.RemoveResourcePoliciesInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_remove_resource_policies_request_resource=compute.InstancesRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + + +def test_reset_rest(transport: str = 'rest', request_type=compute.ResetInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.reset(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_reset_rest_bad_request(transport: str = 'rest', request_type=compute.ResetInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.reset(request) + + +def test_reset_rest_from_dict(): + test_reset_rest(request_type=dict) + + +def test_reset_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.reset(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/reset" % client.transport._host, args[1]) + + +def test_reset_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.reset( + compute.ResetInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_send_diagnostic_interrupt_rest(transport: str = 'rest', request_type=compute.SendDiagnosticInterruptInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SendDiagnosticInterruptInstanceResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SendDiagnosticInterruptInstanceResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.send_diagnostic_interrupt(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SendDiagnosticInterruptInstanceResponse) + + +def test_send_diagnostic_interrupt_rest_bad_request(transport: str = 'rest', request_type=compute.SendDiagnosticInterruptInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.send_diagnostic_interrupt(request) + + +def test_send_diagnostic_interrupt_rest_from_dict(): + test_send_diagnostic_interrupt_rest(request_type=dict) + + +def test_send_diagnostic_interrupt_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SendDiagnosticInterruptInstanceResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SendDiagnosticInterruptInstanceResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.send_diagnostic_interrupt(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/sendDiagnosticInterrupt" % client.transport._host, args[1]) + + +def test_send_diagnostic_interrupt_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.send_diagnostic_interrupt( + compute.SendDiagnosticInterruptInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_set_deletion_protection_rest(transport: str = 'rest', request_type=compute.SetDeletionProtectionInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_deletion_protection(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_deletion_protection_rest_bad_request(transport: str = 'rest', request_type=compute.SetDeletionProtectionInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_deletion_protection(request) + + +def test_set_deletion_protection_rest_from_dict(): + test_set_deletion_protection_rest(request_type=dict) + + +def test_set_deletion_protection_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.set_deletion_protection(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/setDeletionProtection" % client.transport._host, args[1]) + + +def test_set_deletion_protection_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_deletion_protection( + compute.SetDeletionProtectionInstanceRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + ) + + +def test_set_disk_auto_delete_rest(transport: str = 'rest', request_type=compute.SetDiskAutoDeleteInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_disk_auto_delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_disk_auto_delete_rest_bad_request(transport: str = 'rest', request_type=compute.SetDiskAutoDeleteInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_disk_auto_delete(request) + + +def test_set_disk_auto_delete_rest_from_dict(): + test_set_disk_auto_delete_rest(request_type=dict) + + +def test_set_disk_auto_delete_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + auto_delete=True, + device_name='device_name_value', + ) + mock_args.update(sample_request) + client.set_disk_auto_delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete" % client.transport._host, args[1]) + + +def test_set_disk_auto_delete_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_disk_auto_delete( + compute.SetDiskAutoDeleteInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + auto_delete=True, + device_name='device_name_value', + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyInstanceRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_labels_request_resource"] = compute.InstancesSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_labels_request_resource"] = compute.InstancesSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_labels_request_resource=compute.InstancesSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_labels_request_resource=compute.InstancesSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_set_machine_resources_rest(transport: str = 'rest', request_type=compute.SetMachineResourcesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_machine_resources_request_resource"] = compute.InstancesSetMachineResourcesRequest(guest_accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_machine_resources(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_machine_resources_rest_bad_request(transport: str = 'rest', request_type=compute.SetMachineResourcesInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_machine_resources_request_resource"] = compute.InstancesSetMachineResourcesRequest(guest_accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_machine_resources(request) + + +def test_set_machine_resources_rest_from_dict(): + test_set_machine_resources_rest(request_type=dict) + + +def test_set_machine_resources_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_machine_resources_request_resource=compute.InstancesSetMachineResourcesRequest(guest_accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]), + ) + mock_args.update(sample_request) + client.set_machine_resources(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMachineResources" % client.transport._host, args[1]) + + +def test_set_machine_resources_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_machine_resources( + compute.SetMachineResourcesInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_machine_resources_request_resource=compute.InstancesSetMachineResourcesRequest(guest_accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]), + ) + + +def test_set_machine_type_rest(transport: str = 'rest', request_type=compute.SetMachineTypeInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_machine_type_request_resource"] = compute.InstancesSetMachineTypeRequest(machine_type='machine_type_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_machine_type(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_machine_type_rest_bad_request(transport: str = 'rest', request_type=compute.SetMachineTypeInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_machine_type_request_resource"] = compute.InstancesSetMachineTypeRequest(machine_type='machine_type_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_machine_type(request) + + +def test_set_machine_type_rest_from_dict(): + test_set_machine_type_rest(request_type=dict) + + +def test_set_machine_type_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_machine_type_request_resource=compute.InstancesSetMachineTypeRequest(machine_type='machine_type_value'), + ) + mock_args.update(sample_request) + client.set_machine_type(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMachineType" % client.transport._host, args[1]) + + +def test_set_machine_type_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_machine_type( + compute.SetMachineTypeInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_machine_type_request_resource=compute.InstancesSetMachineTypeRequest(machine_type='machine_type_value'), + ) + + +def test_set_metadata_rest(transport: str = 'rest', request_type=compute.SetMetadataInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["metadata_resource"] = compute.Metadata(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_metadata(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_metadata_rest_bad_request(transport: str = 'rest', request_type=compute.SetMetadataInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["metadata_resource"] = compute.Metadata(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_metadata(request) + + +def test_set_metadata_rest_from_dict(): + test_set_metadata_rest(request_type=dict) + + +def test_set_metadata_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + metadata_resource=compute.Metadata(fingerprint='fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_metadata(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMetadata" % client.transport._host, args[1]) + + +def test_set_metadata_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_metadata( + compute.SetMetadataInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + metadata_resource=compute.Metadata(fingerprint='fingerprint_value'), + ) + + +def test_set_min_cpu_platform_rest(transport: str = 'rest', request_type=compute.SetMinCpuPlatformInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_min_cpu_platform_request_resource"] = compute.InstancesSetMinCpuPlatformRequest(min_cpu_platform='min_cpu_platform_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_min_cpu_platform(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_min_cpu_platform_rest_bad_request(transport: str = 'rest', request_type=compute.SetMinCpuPlatformInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_min_cpu_platform_request_resource"] = compute.InstancesSetMinCpuPlatformRequest(min_cpu_platform='min_cpu_platform_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_min_cpu_platform(request) + + +def test_set_min_cpu_platform_rest_from_dict(): + test_set_min_cpu_platform_rest(request_type=dict) + + +def test_set_min_cpu_platform_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_min_cpu_platform_request_resource=compute.InstancesSetMinCpuPlatformRequest(min_cpu_platform='min_cpu_platform_value'), + ) + mock_args.update(sample_request) + client.set_min_cpu_platform(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setMinCpuPlatform" % client.transport._host, args[1]) + + +def test_set_min_cpu_platform_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_min_cpu_platform( + compute.SetMinCpuPlatformInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_min_cpu_platform_request_resource=compute.InstancesSetMinCpuPlatformRequest(min_cpu_platform='min_cpu_platform_value'), + ) + + +def test_set_scheduling_rest(transport: str = 'rest', request_type=compute.SetSchedulingInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["scheduling_resource"] = compute.Scheduling(automatic_restart=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_scheduling(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_scheduling_rest_bad_request(transport: str = 'rest', request_type=compute.SetSchedulingInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["scheduling_resource"] = compute.Scheduling(automatic_restart=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_scheduling(request) + + +def test_set_scheduling_rest_from_dict(): + test_set_scheduling_rest(request_type=dict) + + +def test_set_scheduling_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + scheduling_resource=compute.Scheduling(automatic_restart=True), + ) + mock_args.update(sample_request) + client.set_scheduling(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setScheduling" % client.transport._host, args[1]) + + +def test_set_scheduling_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_scheduling( + compute.SetSchedulingInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + scheduling_resource=compute.Scheduling(automatic_restart=True), + ) + + +def test_set_service_account_rest(transport: str = 'rest', request_type=compute.SetServiceAccountInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_service_account_request_resource"] = compute.InstancesSetServiceAccountRequest(email='email_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_service_account(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_service_account_rest_bad_request(transport: str = 'rest', request_type=compute.SetServiceAccountInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_set_service_account_request_resource"] = compute.InstancesSetServiceAccountRequest(email='email_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_service_account(request) + + +def test_set_service_account_rest_from_dict(): + test_set_service_account_rest(request_type=dict) + + +def test_set_service_account_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_service_account_request_resource=compute.InstancesSetServiceAccountRequest(email='email_value'), + ) + mock_args.update(sample_request) + client.set_service_account(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setServiceAccount" % client.transport._host, args[1]) + + +def test_set_service_account_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_service_account( + compute.SetServiceAccountInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_set_service_account_request_resource=compute.InstancesSetServiceAccountRequest(email='email_value'), + ) + + +def test_set_shielded_instance_integrity_policy_rest(transport: str = 'rest', request_type=compute.SetShieldedInstanceIntegrityPolicyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["shielded_instance_integrity_policy_resource"] = compute.ShieldedInstanceIntegrityPolicy(update_auto_learn_policy=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_shielded_instance_integrity_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_shielded_instance_integrity_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetShieldedInstanceIntegrityPolicyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["shielded_instance_integrity_policy_resource"] = compute.ShieldedInstanceIntegrityPolicy(update_auto_learn_policy=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_shielded_instance_integrity_policy(request) + + +def test_set_shielded_instance_integrity_policy_rest_from_dict(): + test_set_shielded_instance_integrity_policy_rest(request_type=dict) + + +def test_set_shielded_instance_integrity_policy_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + shielded_instance_integrity_policy_resource=compute.ShieldedInstanceIntegrityPolicy(update_auto_learn_policy=True), + ) + mock_args.update(sample_request) + client.set_shielded_instance_integrity_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setShieldedInstanceIntegrityPolicy" % client.transport._host, args[1]) + + +def test_set_shielded_instance_integrity_policy_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_shielded_instance_integrity_policy( + compute.SetShieldedInstanceIntegrityPolicyInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + shielded_instance_integrity_policy_resource=compute.ShieldedInstanceIntegrityPolicy(update_auto_learn_policy=True), + ) + + +def test_set_tags_rest(transport: str = 'rest', request_type=compute.SetTagsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["tags_resource"] = compute.Tags(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_tags(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_tags_rest_bad_request(transport: str = 'rest', request_type=compute.SetTagsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["tags_resource"] = compute.Tags(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_tags(request) + + +def test_set_tags_rest_from_dict(): + test_set_tags_rest(request_type=dict) + + +def test_set_tags_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + tags_resource=compute.Tags(fingerprint='fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_tags(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/setTags" % client.transport._host, args[1]) + + +def test_set_tags_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_tags( + compute.SetTagsInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + tags_resource=compute.Tags(fingerprint='fingerprint_value'), + ) + + +def test_simulate_maintenance_event_rest(transport: str = 'rest', request_type=compute.SimulateMaintenanceEventInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.simulate_maintenance_event(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_simulate_maintenance_event_rest_bad_request(transport: str = 'rest', request_type=compute.SimulateMaintenanceEventInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.simulate_maintenance_event(request) + + +def test_simulate_maintenance_event_rest_from_dict(): + test_simulate_maintenance_event_rest(request_type=dict) + + +def test_simulate_maintenance_event_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.simulate_maintenance_event(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/simulateMaintenanceEvent" % client.transport._host, args[1]) + + +def test_simulate_maintenance_event_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.simulate_maintenance_event( + compute.SimulateMaintenanceEventInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_start_rest(transport: str = 'rest', request_type=compute.StartInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.start(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_start_rest_bad_request(transport: str = 'rest', request_type=compute.StartInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.start(request) + + +def test_start_rest_from_dict(): + test_start_rest(request_type=dict) + + +def test_start_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.start(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/start" % client.transport._host, args[1]) + + +def test_start_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start( + compute.StartInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_start_with_encryption_key_rest(transport: str = 'rest', request_type=compute.StartWithEncryptionKeyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_start_with_encryption_key_request_resource"] = compute.InstancesStartWithEncryptionKeyRequest(disks=[compute.CustomerEncryptionKeyProtectedDisk(disk_encryption_key=compute.CustomerEncryptionKey(kms_key_name='kms_key_name_value'))]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.start_with_encryption_key(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_start_with_encryption_key_rest_bad_request(transport: str = 'rest', request_type=compute.StartWithEncryptionKeyInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instances_start_with_encryption_key_request_resource"] = compute.InstancesStartWithEncryptionKeyRequest(disks=[compute.CustomerEncryptionKeyProtectedDisk(disk_encryption_key=compute.CustomerEncryptionKey(kms_key_name='kms_key_name_value'))]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.start_with_encryption_key(request) + + +def test_start_with_encryption_key_rest_from_dict(): + test_start_with_encryption_key_rest(request_type=dict) + + +def test_start_with_encryption_key_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instances_start_with_encryption_key_request_resource=compute.InstancesStartWithEncryptionKeyRequest(disks=[compute.CustomerEncryptionKeyProtectedDisk(disk_encryption_key=compute.CustomerEncryptionKey(kms_key_name='kms_key_name_value'))]), + ) + mock_args.update(sample_request) + client.start_with_encryption_key(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey" % client.transport._host, args[1]) + + +def test_start_with_encryption_key_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.start_with_encryption_key( + compute.StartWithEncryptionKeyInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instances_start_with_encryption_key_request_resource=compute.InstancesStartWithEncryptionKeyRequest(disks=[compute.CustomerEncryptionKeyProtectedDisk(disk_encryption_key=compute.CustomerEncryptionKey(kms_key_name='kms_key_name_value'))]), + ) + + +def test_stop_rest(transport: str = 'rest', request_type=compute.StopInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.stop(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_stop_rest_bad_request(transport: str = 'rest', request_type=compute.StopInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.stop(request) + + +def test_stop_rest_from_dict(): + test_stop_rest(request_type=dict) + + +def test_stop_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + ) + mock_args.update(sample_request) + client.stop(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/stop" % client.transport._host, args[1]) + + +def test_stop_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.stop( + compute.StopInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsInstanceRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instance_resource"] = compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["instance_resource"] = compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + instance_resource=compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + instance_resource=compute.Instance(advanced_machine_features=compute.AdvancedMachineFeatures(enable_nested_virtualization=True)), + ) + + +def test_update_access_config_rest(transport: str = 'rest', request_type=compute.UpdateAccessConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["access_config_resource"] = compute.AccessConfig(external_ipv6='external_ipv6_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_access_config(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_access_config_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateAccessConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["access_config_resource"] = compute.AccessConfig(external_ipv6='external_ipv6_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_access_config(request) + + +def test_update_access_config_rest_from_dict(): + test_update_access_config_rest(request_type=dict) + + +def test_update_access_config_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + access_config_resource=compute.AccessConfig(external_ipv6='external_ipv6_value'), + ) + mock_args.update(sample_request) + client.update_access_config(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateAccessConfig" % client.transport._host, args[1]) + + +def test_update_access_config_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_access_config( + compute.UpdateAccessConfigInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + access_config_resource=compute.AccessConfig(external_ipv6='external_ipv6_value'), + ) + + +def test_update_display_device_rest(transport: str = 'rest', request_type=compute.UpdateDisplayDeviceInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["display_device_resource"] = compute.DisplayDevice(enable_display=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_display_device(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_display_device_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateDisplayDeviceInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["display_device_resource"] = compute.DisplayDevice(enable_display=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_display_device(request) + + +def test_update_display_device_rest_from_dict(): + test_update_display_device_rest(request_type=dict) + + +def test_update_display_device_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + display_device_resource=compute.DisplayDevice(enable_display=True), + ) + mock_args.update(sample_request) + client.update_display_device(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateDisplayDevice" % client.transport._host, args[1]) + + +def test_update_display_device_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_display_device( + compute.UpdateDisplayDeviceInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + display_device_resource=compute.DisplayDevice(enable_display=True), + ) + + +def test_update_network_interface_rest(transport: str = 'rest', request_type=compute.UpdateNetworkInterfaceInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["network_interface_resource"] = compute.NetworkInterface(access_configs=[compute.AccessConfig(external_ipv6='external_ipv6_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_network_interface(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_network_interface_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateNetworkInterfaceInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["network_interface_resource"] = compute.NetworkInterface(access_configs=[compute.AccessConfig(external_ipv6='external_ipv6_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_network_interface(request) + + +def test_update_network_interface_rest_from_dict(): + test_update_network_interface_rest(request_type=dict) + + +def test_update_network_interface_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + network_interface_resource=compute.NetworkInterface(access_configs=[compute.AccessConfig(external_ipv6='external_ipv6_value')]), + ) + mock_args.update(sample_request) + client.update_network_interface(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateNetworkInterface" % client.transport._host, args[1]) + + +def test_update_network_interface_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_network_interface( + compute.UpdateNetworkInterfaceInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + network_interface='network_interface_value', + network_interface_resource=compute.NetworkInterface(access_configs=[compute.AccessConfig(external_ipv6='external_ipv6_value')]), + ) + + +def test_update_shielded_instance_config_rest(transport: str = 'rest', request_type=compute.UpdateShieldedInstanceConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["shielded_instance_config_resource"] = compute.ShieldedInstanceConfig(enable_integrity_monitoring=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_shielded_instance_config(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_shielded_instance_config_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateShieldedInstanceConfigInstanceRequest): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + request_init["shielded_instance_config_resource"] = compute.ShieldedInstanceConfig(enable_integrity_monitoring=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_shielded_instance_config(request) + + +def test_update_shielded_instance_config_rest_from_dict(): + test_update_shielded_instance_config_rest(request_type=dict) + + +def test_update_shielded_instance_config_rest_flattened(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + instance='instance_value', + shielded_instance_config_resource=compute.ShieldedInstanceConfig(enable_integrity_monitoring=True), + ) + mock_args.update(sample_request) + client.update_shielded_instance_config(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/updateShieldedInstanceConfig" % client.transport._host, args[1]) + + +def test_update_shielded_instance_config_rest_flattened_error(transport: str = 'rest'): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_shielded_instance_config( + compute.UpdateShieldedInstanceConfigInstanceRequest(), + project='project_value', + zone='zone_value', + instance='instance_value', + shielded_instance_config_resource=compute.ShieldedInstanceConfig(enable_integrity_monitoring=True), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstancesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InstancesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = InstancesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.InstancesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_instances_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.InstancesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_instances_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.instances.transports.InstancesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.InstancesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_access_config', + 'add_resource_policies', + 'aggregated_list', + 'attach_disk', + 'bulk_insert', + 'delete', + 'delete_access_config', + 'detach_disk', + 'get', + 'get_effective_firewalls', + 'get_guest_attributes', + 'get_iam_policy', + 'get_screenshot', + 'get_serial_port_output', + 'get_shielded_instance_identity', + 'insert', + 'list', + 'list_referrers', + 'remove_resource_policies', + 'reset', + 'send_diagnostic_interrupt', + 'set_deletion_protection', + 'set_disk_auto_delete', + 'set_iam_policy', + 'set_labels', + 'set_machine_resources', + 'set_machine_type', + 'set_metadata', + 'set_min_cpu_platform', + 'set_scheduling', + 'set_service_account', + 'set_shielded_instance_integrity_policy', + 'set_tags', + 'simulate_maintenance_event', + 'start', + 'start_with_encryption_key', + 'stop', + 'test_iam_permissions', + 'update', + 'update_access_config', + 'update_display_device', + 'update_network_interface', + 'update_shielded_instance_config', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_instances_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.instances.transports.InstancesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstancesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_instances_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.instances.transports.InstancesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InstancesTransport() + adc.assert_called_once() + + +def test_instances_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + InstancesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_instances_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.InstancesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_instances_host_no_port(): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_instances_host_with_port(): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = InstancesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = InstancesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InstancesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = InstancesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = InstancesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InstancesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = InstancesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = InstancesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InstancesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = InstancesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = InstancesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InstancesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = InstancesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = InstancesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InstancesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.InstancesTransport, '_prep_wrapped_messages') as prep: + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.InstancesTransport, '_prep_wrapped_messages') as prep: + transport_class = InstancesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = InstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnect_attachments.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnect_attachments.py new file mode 100644 index 000000000..a876cdb12 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnect_attachments.py @@ -0,0 +1,1620 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.interconnect_attachments import InterconnectAttachmentsClient +from google.cloud.compute_v1.services.interconnect_attachments import pagers +from google.cloud.compute_v1.services.interconnect_attachments import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InterconnectAttachmentsClient._get_default_mtls_endpoint(None) is None + assert InterconnectAttachmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert InterconnectAttachmentsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert InterconnectAttachmentsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert InterconnectAttachmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert InterconnectAttachmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + InterconnectAttachmentsClient, +]) +def test_interconnect_attachments_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.InterconnectAttachmentsRestTransport, "rest"), +]) +def test_interconnect_attachments_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + InterconnectAttachmentsClient, +]) +def test_interconnect_attachments_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_interconnect_attachments_client_get_transport_class(): + transport = InterconnectAttachmentsClient.get_transport_class() + available_transports = [ + transports.InterconnectAttachmentsRestTransport, + ] + assert transport in available_transports + + transport = InterconnectAttachmentsClient.get_transport_class("rest") + assert transport == transports.InterconnectAttachmentsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectAttachmentsClient, transports.InterconnectAttachmentsRestTransport, "rest"), +]) +@mock.patch.object(InterconnectAttachmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InterconnectAttachmentsClient)) +def test_interconnect_attachments_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InterconnectAttachmentsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InterconnectAttachmentsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (InterconnectAttachmentsClient, transports.InterconnectAttachmentsRestTransport, "rest", "true"), + (InterconnectAttachmentsClient, transports.InterconnectAttachmentsRestTransport, "rest", "false"), +]) +@mock.patch.object(InterconnectAttachmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InterconnectAttachmentsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_interconnect_attachments_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectAttachmentsClient, transports.InterconnectAttachmentsRestTransport, "rest"), +]) +def test_interconnect_attachments_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectAttachmentsClient, transports.InterconnectAttachmentsRestTransport, "rest"), +]) +def test_interconnect_attachments_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListInterconnectAttachmentsRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectAttachmentAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectAttachmentAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListInterconnectAttachmentsRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectAttachmentAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectAttachmentAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/interconnectAttachments" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListInterconnectAttachmentsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InterconnectAttachmentAggregatedList( + items={ + 'a':compute.InterconnectAttachmentsScopedList(), + 'b':compute.InterconnectAttachmentsScopedList(), + 'c':compute.InterconnectAttachmentsScopedList(), + }, + next_page_token='abc', + ), + compute.InterconnectAttachmentAggregatedList( + items={}, + next_page_token='def', + ), + compute.InterconnectAttachmentAggregatedList( + items={ + 'g':compute.InterconnectAttachmentsScopedList(), + }, + next_page_token='ghi', + ), + compute.InterconnectAttachmentAggregatedList( + items={ + 'h':compute.InterconnectAttachmentsScopedList(), + 'i':compute.InterconnectAttachmentsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InterconnectAttachmentAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.InterconnectAttachmentsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.InterconnectAttachmentsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.InterconnectAttachmentsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + interconnect_attachment='interconnect_attachment_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{interconnect_attachment}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteInterconnectAttachmentRequest(), + project='project_value', + region='region_value', + interconnect_attachment='interconnect_attachment_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectAttachment( + admin_enabled=True, + bandwidth='bandwidth_value', + candidate_subnets=['candidate_subnets_value'], + cloud_router_ip_address='cloud_router_ip_address_value', + creation_timestamp='creation_timestamp_value', + customer_router_ip_address='customer_router_ip_address_value', + dataplane_version=1807, + description='description_value', + edge_availability_domain='edge_availability_domain_value', + encryption='encryption_value', + google_reference_id='google_reference_id_value', + id=205, + interconnect='interconnect_value', + ipsec_internal_addresses=['ipsec_internal_addresses_value'], + kind='kind_value', + mtu=342, + name='name_value', + operational_status='operational_status_value', + pairing_key='pairing_key_value', + partner_asn=1181, + region='region_value', + router='router_value', + satisfies_pzs=True, + self_link='self_link_value', + state='state_value', + type_='type__value', + vlan_tag8021q=1160, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectAttachment.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InterconnectAttachment) + assert response.admin_enabled is True + assert response.bandwidth == 'bandwidth_value' + assert response.candidate_subnets == ['candidate_subnets_value'] + assert response.cloud_router_ip_address == 'cloud_router_ip_address_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.customer_router_ip_address == 'customer_router_ip_address_value' + assert response.dataplane_version == 1807 + assert response.description == 'description_value' + assert response.edge_availability_domain == 'edge_availability_domain_value' + assert response.encryption == 'encryption_value' + assert response.google_reference_id == 'google_reference_id_value' + assert response.id == 205 + assert response.interconnect == 'interconnect_value' + assert response.ipsec_internal_addresses == ['ipsec_internal_addresses_value'] + assert response.kind == 'kind_value' + assert response.mtu == 342 + assert response.name == 'name_value' + assert response.operational_status == 'operational_status_value' + assert response.pairing_key == 'pairing_key_value' + assert response.partner_asn == 1181 + assert response.region == 'region_value' + assert response.router == 'router_value' + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.state == 'state_value' + assert response.type_ == 'type__value' + assert response.vlan_tag8021q == 1160 + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectAttachment() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectAttachment.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + interconnect_attachment='interconnect_attachment_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{interconnect_attachment}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetInterconnectAttachmentRequest(), + project='project_value', + region='region_value', + interconnect_attachment='interconnect_attachment_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["interconnect_attachment_resource"] = compute.InterconnectAttachment(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["interconnect_attachment_resource"] = compute.InterconnectAttachment(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + interconnect_attachment_resource=compute.InterconnectAttachment(admin_enabled=True), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/interconnectAttachments" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertInterconnectAttachmentRequest(), + project='project_value', + region='region_value', + interconnect_attachment_resource=compute.InterconnectAttachment(admin_enabled=True), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListInterconnectAttachmentsRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectAttachmentList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectAttachmentList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListInterconnectAttachmentsRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectAttachmentList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectAttachmentList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/interconnectAttachments" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListInterconnectAttachmentsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InterconnectAttachmentList( + items=[ + compute.InterconnectAttachment(), + compute.InterconnectAttachment(), + compute.InterconnectAttachment(), + ], + next_page_token='abc', + ), + compute.InterconnectAttachmentList( + items=[], + next_page_token='def', + ), + compute.InterconnectAttachmentList( + items=[ + compute.InterconnectAttachment(), + ], + next_page_token='ghi', + ), + compute.InterconnectAttachmentList( + items=[ + compute.InterconnectAttachment(), + compute.InterconnectAttachment(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InterconnectAttachmentList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InterconnectAttachment) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + request_init["interconnect_attachment_resource"] = compute.InterconnectAttachment(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchInterconnectAttachmentRequest): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + request_init["interconnect_attachment_resource"] = compute.InterconnectAttachment(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "interconnect_attachment": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + interconnect_attachment='interconnect_attachment_value', + interconnect_attachment_resource=compute.InterconnectAttachment(admin_enabled=True), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/interconnectAttachments/{interconnect_attachment}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchInterconnectAttachmentRequest(), + project='project_value', + region='region_value', + interconnect_attachment='interconnect_attachment_value', + interconnect_attachment_resource=compute.InterconnectAttachment(admin_enabled=True), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InterconnectAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InterconnectAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectAttachmentsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InterconnectAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectAttachmentsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InterconnectAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = InterconnectAttachmentsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.InterconnectAttachmentsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_interconnect_attachments_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.InterconnectAttachmentsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_interconnect_attachments_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.interconnect_attachments.transports.InterconnectAttachmentsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.InterconnectAttachmentsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_interconnect_attachments_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.interconnect_attachments.transports.InterconnectAttachmentsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InterconnectAttachmentsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_interconnect_attachments_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.interconnect_attachments.transports.InterconnectAttachmentsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InterconnectAttachmentsTransport() + adc.assert_called_once() + + +def test_interconnect_attachments_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + InterconnectAttachmentsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_interconnect_attachments_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.InterconnectAttachmentsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_interconnect_attachments_host_no_port(): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_interconnect_attachments_host_with_port(): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = InterconnectAttachmentsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = InterconnectAttachmentsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectAttachmentsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = InterconnectAttachmentsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = InterconnectAttachmentsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectAttachmentsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = InterconnectAttachmentsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = InterconnectAttachmentsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectAttachmentsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = InterconnectAttachmentsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = InterconnectAttachmentsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectAttachmentsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = InterconnectAttachmentsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = InterconnectAttachmentsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectAttachmentsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.InterconnectAttachmentsTransport, '_prep_wrapped_messages') as prep: + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.InterconnectAttachmentsTransport, '_prep_wrapped_messages') as prep: + transport_class = InterconnectAttachmentsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = InterconnectAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnect_locations.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnect_locations.py new file mode 100644 index 000000000..0f5c54840 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnect_locations.py @@ -0,0 +1,947 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.interconnect_locations import InterconnectLocationsClient +from google.cloud.compute_v1.services.interconnect_locations import pagers +from google.cloud.compute_v1.services.interconnect_locations import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InterconnectLocationsClient._get_default_mtls_endpoint(None) is None + assert InterconnectLocationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert InterconnectLocationsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert InterconnectLocationsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert InterconnectLocationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert InterconnectLocationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + InterconnectLocationsClient, +]) +def test_interconnect_locations_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.InterconnectLocationsRestTransport, "rest"), +]) +def test_interconnect_locations_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + InterconnectLocationsClient, +]) +def test_interconnect_locations_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_interconnect_locations_client_get_transport_class(): + transport = InterconnectLocationsClient.get_transport_class() + available_transports = [ + transports.InterconnectLocationsRestTransport, + ] + assert transport in available_transports + + transport = InterconnectLocationsClient.get_transport_class("rest") + assert transport == transports.InterconnectLocationsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectLocationsClient, transports.InterconnectLocationsRestTransport, "rest"), +]) +@mock.patch.object(InterconnectLocationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InterconnectLocationsClient)) +def test_interconnect_locations_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InterconnectLocationsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InterconnectLocationsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (InterconnectLocationsClient, transports.InterconnectLocationsRestTransport, "rest", "true"), + (InterconnectLocationsClient, transports.InterconnectLocationsRestTransport, "rest", "false"), +]) +@mock.patch.object(InterconnectLocationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InterconnectLocationsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_interconnect_locations_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectLocationsClient, transports.InterconnectLocationsRestTransport, "rest"), +]) +def test_interconnect_locations_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectLocationsClient, transports.InterconnectLocationsRestTransport, "rest"), +]) +def test_interconnect_locations_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetInterconnectLocationRequest): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect_location": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectLocation( + address='address_value', + availability_zone='availability_zone_value', + city='city_value', + continent='continent_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + facility_provider='facility_provider_value', + facility_provider_facility_id='facility_provider_facility_id_value', + id=205, + kind='kind_value', + name='name_value', + peeringdb_facility_id='peeringdb_facility_id_value', + self_link='self_link_value', + status='status_value', + supports_pzs=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectLocation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InterconnectLocation) + assert response.address == 'address_value' + assert response.availability_zone == 'availability_zone_value' + assert response.city == 'city_value' + assert response.continent == 'continent_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.facility_provider == 'facility_provider_value' + assert response.facility_provider_facility_id == 'facility_provider_facility_id_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.peeringdb_facility_id == 'peeringdb_facility_id_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.supports_pzs is True + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetInterconnectLocationRequest): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect_location": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectLocation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectLocation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "interconnect_location": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + interconnect_location='interconnect_location_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnectLocations/{interconnect_location}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetInterconnectLocationRequest(), + project='project_value', + interconnect_location='interconnect_location_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListInterconnectLocationsRequest): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectLocationList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectLocationList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListInterconnectLocationsRequest): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectLocationList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectLocationList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnectLocations" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListInterconnectLocationsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InterconnectLocationList( + items=[ + compute.InterconnectLocation(), + compute.InterconnectLocation(), + compute.InterconnectLocation(), + ], + next_page_token='abc', + ), + compute.InterconnectLocationList( + items=[], + next_page_token='def', + ), + compute.InterconnectLocationList( + items=[ + compute.InterconnectLocation(), + ], + next_page_token='ghi', + ), + compute.InterconnectLocationList( + items=[ + compute.InterconnectLocation(), + compute.InterconnectLocation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InterconnectLocationList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InterconnectLocation) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InterconnectLocationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InterconnectLocationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectLocationsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InterconnectLocationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectLocationsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InterconnectLocationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = InterconnectLocationsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.InterconnectLocationsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_interconnect_locations_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.InterconnectLocationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_interconnect_locations_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.interconnect_locations.transports.InterconnectLocationsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.InterconnectLocationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_interconnect_locations_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.interconnect_locations.transports.InterconnectLocationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InterconnectLocationsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_interconnect_locations_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.interconnect_locations.transports.InterconnectLocationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InterconnectLocationsTransport() + adc.assert_called_once() + + +def test_interconnect_locations_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + InterconnectLocationsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_interconnect_locations_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.InterconnectLocationsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_interconnect_locations_host_no_port(): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_interconnect_locations_host_with_port(): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = InterconnectLocationsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = InterconnectLocationsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectLocationsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = InterconnectLocationsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = InterconnectLocationsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectLocationsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = InterconnectLocationsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = InterconnectLocationsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectLocationsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = InterconnectLocationsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = InterconnectLocationsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectLocationsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = InterconnectLocationsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = InterconnectLocationsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectLocationsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.InterconnectLocationsTransport, '_prep_wrapped_messages') as prep: + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.InterconnectLocationsTransport, '_prep_wrapped_messages') as prep: + transport_class = InterconnectLocationsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = InterconnectLocationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnects.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnects.py new file mode 100644 index 000000000..7fdedf6f8 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_interconnects.py @@ -0,0 +1,1519 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.interconnects import InterconnectsClient +from google.cloud.compute_v1.services.interconnects import pagers +from google.cloud.compute_v1.services.interconnects import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert InterconnectsClient._get_default_mtls_endpoint(None) is None + assert InterconnectsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert InterconnectsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert InterconnectsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert InterconnectsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert InterconnectsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + InterconnectsClient, +]) +def test_interconnects_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.InterconnectsRestTransport, "rest"), +]) +def test_interconnects_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + InterconnectsClient, +]) +def test_interconnects_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_interconnects_client_get_transport_class(): + transport = InterconnectsClient.get_transport_class() + available_transports = [ + transports.InterconnectsRestTransport, + ] + assert transport in available_transports + + transport = InterconnectsClient.get_transport_class("rest") + assert transport == transports.InterconnectsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectsClient, transports.InterconnectsRestTransport, "rest"), +]) +@mock.patch.object(InterconnectsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InterconnectsClient)) +def test_interconnects_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(InterconnectsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(InterconnectsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (InterconnectsClient, transports.InterconnectsRestTransport, "rest", "true"), + (InterconnectsClient, transports.InterconnectsRestTransport, "rest", "false"), +]) +@mock.patch.object(InterconnectsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(InterconnectsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_interconnects_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectsClient, transports.InterconnectsRestTransport, "rest"), +]) +def test_interconnects_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (InterconnectsClient, transports.InterconnectsRestTransport, "rest"), +]) +def test_interconnects_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "interconnect": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + interconnect='interconnect_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnects/{interconnect}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteInterconnectRequest(), + project='project_value', + interconnect='interconnect_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Interconnect( + admin_enabled=True, + creation_timestamp='creation_timestamp_value', + customer_name='customer_name_value', + description='description_value', + google_ip_address='google_ip_address_value', + google_reference_id='google_reference_id_value', + id=205, + interconnect_attachments=['interconnect_attachments_value'], + interconnect_type='interconnect_type_value', + kind='kind_value', + link_type='link_type_value', + location='location_value', + name='name_value', + noc_contact_email='noc_contact_email_value', + operational_status='operational_status_value', + peer_ip_address='peer_ip_address_value', + provisioned_link_count=2375, + requested_link_count=2151, + satisfies_pzs=True, + self_link='self_link_value', + state='state_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Interconnect.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Interconnect) + assert response.admin_enabled is True + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.customer_name == 'customer_name_value' + assert response.description == 'description_value' + assert response.google_ip_address == 'google_ip_address_value' + assert response.google_reference_id == 'google_reference_id_value' + assert response.id == 205 + assert response.interconnect_attachments == ['interconnect_attachments_value'] + assert response.interconnect_type == 'interconnect_type_value' + assert response.kind == 'kind_value' + assert response.link_type == 'link_type_value' + assert response.location == 'location_value' + assert response.name == 'name_value' + assert response.noc_contact_email == 'noc_contact_email_value' + assert response.operational_status == 'operational_status_value' + assert response.peer_ip_address == 'peer_ip_address_value' + assert response.provisioned_link_count == 2375 + assert response.requested_link_count == 2151 + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.state == 'state_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Interconnect() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Interconnect.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "interconnect": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + interconnect='interconnect_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnects/{interconnect}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetInterconnectRequest(), + project='project_value', + interconnect='interconnect_value', + ) + + +def test_get_diagnostics_rest(transport: str = 'rest', request_type=compute.GetDiagnosticsInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectsGetDiagnosticsResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectsGetDiagnosticsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_diagnostics(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InterconnectsGetDiagnosticsResponse) + + +def test_get_diagnostics_rest_bad_request(transport: str = 'rest', request_type=compute.GetDiagnosticsInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_diagnostics(request) + + +def test_get_diagnostics_rest_from_dict(): + test_get_diagnostics_rest(request_type=dict) + + +def test_get_diagnostics_rest_flattened(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectsGetDiagnosticsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectsGetDiagnosticsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "interconnect": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + interconnect='interconnect_value', + ) + mock_args.update(sample_request) + client.get_diagnostics(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnects/{interconnect}/getDiagnostics" % client.transport._host, args[1]) + + +def test_get_diagnostics_rest_flattened_error(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_diagnostics( + compute.GetDiagnosticsInterconnectRequest(), + project='project_value', + interconnect='interconnect_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["interconnect_resource"] = compute.Interconnect(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["interconnect_resource"] = compute.Interconnect(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + interconnect_resource=compute.Interconnect(admin_enabled=True), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnects" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertInterconnectRequest(), + project='project_value', + interconnect_resource=compute.Interconnect(admin_enabled=True), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListInterconnectsRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListInterconnectsRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InterconnectList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InterconnectList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnects" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListInterconnectsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.InterconnectList( + items=[ + compute.Interconnect(), + compute.Interconnect(), + compute.Interconnect(), + ], + next_page_token='abc', + ), + compute.InterconnectList( + items=[], + next_page_token='def', + ), + compute.InterconnectList( + items=[ + compute.Interconnect(), + ], + next_page_token='ghi', + ), + compute.InterconnectList( + items=[ + compute.Interconnect(), + compute.Interconnect(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.InterconnectList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Interconnect) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request_init["interconnect_resource"] = compute.Interconnect(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchInterconnectRequest): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "interconnect": "sample2"} + request_init["interconnect_resource"] = compute.Interconnect(admin_enabled=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "interconnect": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + interconnect='interconnect_value', + interconnect_resource=compute.Interconnect(admin_enabled=True), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/interconnects/{interconnect}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchInterconnectRequest(), + project='project_value', + interconnect='interconnect_value', + interconnect_resource=compute.Interconnect(admin_enabled=True), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.InterconnectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.InterconnectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.InterconnectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = InterconnectsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.InterconnectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = InterconnectsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.InterconnectsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_interconnects_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.InterconnectsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_interconnects_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.interconnects.transports.InterconnectsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.InterconnectsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'get_diagnostics', + 'insert', + 'list', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_interconnects_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.interconnects.transports.InterconnectsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InterconnectsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_interconnects_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.interconnects.transports.InterconnectsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.InterconnectsTransport() + adc.assert_called_once() + + +def test_interconnects_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + InterconnectsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_interconnects_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.InterconnectsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_interconnects_host_no_port(): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_interconnects_host_with_port(): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = InterconnectsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = InterconnectsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = InterconnectsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = InterconnectsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = InterconnectsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = InterconnectsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = InterconnectsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = InterconnectsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = InterconnectsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = InterconnectsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = InterconnectsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.InterconnectsTransport, '_prep_wrapped_messages') as prep: + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.InterconnectsTransport, '_prep_wrapped_messages') as prep: + transport_class = InterconnectsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = InterconnectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_license_codes.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_license_codes.py new file mode 100644 index 000000000..e9f39e136 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_license_codes.py @@ -0,0 +1,871 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.license_codes import LicenseCodesClient +from google.cloud.compute_v1.services.license_codes import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert LicenseCodesClient._get_default_mtls_endpoint(None) is None + assert LicenseCodesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert LicenseCodesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert LicenseCodesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert LicenseCodesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert LicenseCodesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + LicenseCodesClient, +]) +def test_license_codes_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.LicenseCodesRestTransport, "rest"), +]) +def test_license_codes_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + LicenseCodesClient, +]) +def test_license_codes_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_license_codes_client_get_transport_class(): + transport = LicenseCodesClient.get_transport_class() + available_transports = [ + transports.LicenseCodesRestTransport, + ] + assert transport in available_transports + + transport = LicenseCodesClient.get_transport_class("rest") + assert transport == transports.LicenseCodesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (LicenseCodesClient, transports.LicenseCodesRestTransport, "rest"), +]) +@mock.patch.object(LicenseCodesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LicenseCodesClient)) +def test_license_codes_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(LicenseCodesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(LicenseCodesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (LicenseCodesClient, transports.LicenseCodesRestTransport, "rest", "true"), + (LicenseCodesClient, transports.LicenseCodesRestTransport, "rest", "false"), +]) +@mock.patch.object(LicenseCodesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LicenseCodesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_license_codes_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (LicenseCodesClient, transports.LicenseCodesRestTransport, "rest"), +]) +def test_license_codes_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (LicenseCodesClient, transports.LicenseCodesRestTransport, "rest"), +]) +def test_license_codes_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetLicenseCodeRequest): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_code": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.LicenseCode( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + state='state_value', + transferable=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.LicenseCode.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.LicenseCode) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.state == 'state_value' + assert response.transferable is True + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetLicenseCodeRequest): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_code": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.LicenseCode() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.LicenseCode.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "license_code": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + license_code='license_code_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenseCodes/{license_code}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetLicenseCodeRequest(), + project='project_value', + license_code='license_code_value', + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsLicenseCodeRequest): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsLicenseCodeRequest): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenseCodes/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsLicenseCodeRequest(), + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LicenseCodesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LicenseCodesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.LicenseCodesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = LicenseCodesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.LicenseCodesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_license_codes_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.LicenseCodesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_license_codes_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.license_codes.transports.LicenseCodesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.LicenseCodesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'get', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_license_codes_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.license_codes.transports.LicenseCodesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.LicenseCodesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_license_codes_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.license_codes.transports.LicenseCodesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.LicenseCodesTransport() + adc.assert_called_once() + + +def test_license_codes_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + LicenseCodesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_license_codes_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.LicenseCodesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_license_codes_host_no_port(): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_license_codes_host_with_port(): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = LicenseCodesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = LicenseCodesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = LicenseCodesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = LicenseCodesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = LicenseCodesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = LicenseCodesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = LicenseCodesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = LicenseCodesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = LicenseCodesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = LicenseCodesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = LicenseCodesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = LicenseCodesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = LicenseCodesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = LicenseCodesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = LicenseCodesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.LicenseCodesTransport, '_prep_wrapped_messages') as prep: + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.LicenseCodesTransport, '_prep_wrapped_messages') as prep: + transport_class = LicenseCodesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = LicenseCodesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_licenses.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_licenses.py new file mode 100644 index 000000000..8e515aa81 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_licenses.py @@ -0,0 +1,1573 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.licenses import LicensesClient +from google.cloud.compute_v1.services.licenses import pagers +from google.cloud.compute_v1.services.licenses import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert LicensesClient._get_default_mtls_endpoint(None) is None + assert LicensesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert LicensesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert LicensesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert LicensesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert LicensesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + LicensesClient, +]) +def test_licenses_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.LicensesRestTransport, "rest"), +]) +def test_licenses_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + LicensesClient, +]) +def test_licenses_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_licenses_client_get_transport_class(): + transport = LicensesClient.get_transport_class() + available_transports = [ + transports.LicensesRestTransport, + ] + assert transport in available_transports + + transport = LicensesClient.get_transport_class("rest") + assert transport == transports.LicensesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (LicensesClient, transports.LicensesRestTransport, "rest"), +]) +@mock.patch.object(LicensesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LicensesClient)) +def test_licenses_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(LicensesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(LicensesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (LicensesClient, transports.LicensesRestTransport, "rest", "true"), + (LicensesClient, transports.LicensesRestTransport, "rest", "false"), +]) +@mock.patch.object(LicensesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(LicensesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_licenses_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (LicensesClient, transports.LicensesRestTransport, "rest"), +]) +def test_licenses_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (LicensesClient, transports.LicensesRestTransport, "rest"), +]) +def test_licenses_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "license_": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + license_='license__value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenses/{license_}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteLicenseRequest(), + project='project_value', + license_='license__value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.License( + charges_use_fee=True, + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + license_code=1245, + name='name_value', + self_link='self_link_value', + transferable=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.License.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.License) + assert response.charges_use_fee is True + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.license_code == 1245 + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.transferable is True + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "license_": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.License() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.License.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "license_": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + license_='license__value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenses/{license_}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetLicenseRequest(), + project='project_value', + license_='license__value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenses/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyLicenseRequest(), + project='project_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["license_resource"] = compute.License(charges_use_fee=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["license_resource"] = compute.License(charges_use_fee=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + license_resource=compute.License(charges_use_fee=True), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenses" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertLicenseRequest(), + project='project_value', + license_resource=compute.License(charges_use_fee=True), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListLicensesRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.LicensesListResponse( + id='id_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.LicensesListResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListLicensesRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.LicensesListResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.LicensesListResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenses" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListLicensesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.LicensesListResponse( + items=[ + compute.License(), + compute.License(), + compute.License(), + ], + next_page_token='abc', + ), + compute.LicensesListResponse( + items=[], + next_page_token='def', + ), + compute.LicensesListResponse( + items=[ + compute.License(), + ], + next_page_token='ghi', + ), + compute.LicensesListResponse( + items=[ + compute.License(), + compute.License(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.LicensesListResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.License) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenses/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyLicenseRequest(), + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsLicenseRequest): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/licenses/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsLicenseRequest(), + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.LicensesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.LicensesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LicensesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.LicensesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = LicensesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.LicensesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = LicensesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.LicensesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_licenses_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.LicensesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_licenses_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.licenses.transports.LicensesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.LicensesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_licenses_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.licenses.transports.LicensesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.LicensesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_licenses_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.licenses.transports.LicensesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.LicensesTransport() + adc.assert_called_once() + + +def test_licenses_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + LicensesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_licenses_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.LicensesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_licenses_host_no_port(): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_licenses_host_with_port(): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = LicensesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = LicensesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = LicensesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = LicensesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = LicensesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = LicensesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = LicensesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = LicensesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = LicensesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = LicensesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = LicensesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = LicensesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = LicensesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = LicensesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = LicensesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.LicensesTransport, '_prep_wrapped_messages') as prep: + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.LicensesTransport, '_prep_wrapped_messages') as prep: + transport_class = LicensesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = LicensesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_machine_types.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_machine_types.py new file mode 100644 index 000000000..448c99266 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_machine_types.py @@ -0,0 +1,1132 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.machine_types import MachineTypesClient +from google.cloud.compute_v1.services.machine_types import pagers +from google.cloud.compute_v1.services.machine_types import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert MachineTypesClient._get_default_mtls_endpoint(None) is None + assert MachineTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert MachineTypesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert MachineTypesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert MachineTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert MachineTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + MachineTypesClient, +]) +def test_machine_types_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.MachineTypesRestTransport, "rest"), +]) +def test_machine_types_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + MachineTypesClient, +]) +def test_machine_types_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_machine_types_client_get_transport_class(): + transport = MachineTypesClient.get_transport_class() + available_transports = [ + transports.MachineTypesRestTransport, + ] + assert transport in available_transports + + transport = MachineTypesClient.get_transport_class("rest") + assert transport == transports.MachineTypesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MachineTypesClient, transports.MachineTypesRestTransport, "rest"), +]) +@mock.patch.object(MachineTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MachineTypesClient)) +def test_machine_types_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(MachineTypesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(MachineTypesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (MachineTypesClient, transports.MachineTypesRestTransport, "rest", "true"), + (MachineTypesClient, transports.MachineTypesRestTransport, "rest", "false"), +]) +@mock.patch.object(MachineTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(MachineTypesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_machine_types_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MachineTypesClient, transports.MachineTypesRestTransport, "rest"), +]) +def test_machine_types_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (MachineTypesClient, transports.MachineTypesRestTransport, "rest"), +]) +def test_machine_types_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListMachineTypesRequest): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.MachineTypeAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.MachineTypeAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListMachineTypesRequest): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.MachineTypeAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.MachineTypeAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/machineTypes" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListMachineTypesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.MachineTypeAggregatedList( + items={ + 'a':compute.MachineTypesScopedList(), + 'b':compute.MachineTypesScopedList(), + 'c':compute.MachineTypesScopedList(), + }, + next_page_token='abc', + ), + compute.MachineTypeAggregatedList( + items={}, + next_page_token='def', + ), + compute.MachineTypeAggregatedList( + items={ + 'g':compute.MachineTypesScopedList(), + }, + next_page_token='ghi', + ), + compute.MachineTypeAggregatedList( + items={ + 'h':compute.MachineTypesScopedList(), + 'i':compute.MachineTypesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.MachineTypeAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.MachineTypesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.MachineTypesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.MachineTypesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetMachineTypeRequest): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "machine_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.MachineType( + creation_timestamp='creation_timestamp_value', + description='description_value', + guest_cpus=1090, + id=205, + image_space_gb=1430, + is_shared_cpu=True, + kind='kind_value', + maximum_persistent_disks=2603, + maximum_persistent_disks_size_gb=3437, + memory_mb=967, + name='name_value', + self_link='self_link_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.MachineType.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.MachineType) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.guest_cpus == 1090 + assert response.id == 205 + assert response.image_space_gb == 1430 + assert response.is_shared_cpu is True + assert response.kind == 'kind_value' + assert response.maximum_persistent_disks == 2603 + assert response.maximum_persistent_disks_size_gb == 3437 + assert response.memory_mb == 967 + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetMachineTypeRequest): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "machine_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.MachineType() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.MachineType.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "machine_type": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + machine_type='machine_type_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/machineTypes/{machine_type}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetMachineTypeRequest(), + project='project_value', + zone='zone_value', + machine_type='machine_type_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListMachineTypesRequest): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.MachineTypeList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.MachineTypeList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListMachineTypesRequest): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.MachineTypeList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.MachineTypeList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/machineTypes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListMachineTypesRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.MachineTypeList( + items=[ + compute.MachineType(), + compute.MachineType(), + compute.MachineType(), + ], + next_page_token='abc', + ), + compute.MachineTypeList( + items=[], + next_page_token='def', + ), + compute.MachineTypeList( + items=[ + compute.MachineType(), + ], + next_page_token='ghi', + ), + compute.MachineTypeList( + items=[ + compute.MachineType(), + compute.MachineType(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.MachineTypeList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.MachineType) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.MachineTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.MachineTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MachineTypesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.MachineTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = MachineTypesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.MachineTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = MachineTypesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.MachineTypesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_machine_types_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.MachineTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_machine_types_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.machine_types.transports.MachineTypesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.MachineTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_machine_types_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.machine_types.transports.MachineTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MachineTypesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_machine_types_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.machine_types.transports.MachineTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.MachineTypesTransport() + adc.assert_called_once() + + +def test_machine_types_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + MachineTypesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_machine_types_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.MachineTypesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_machine_types_host_no_port(): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_machine_types_host_with_port(): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = MachineTypesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = MachineTypesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = MachineTypesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = MachineTypesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = MachineTypesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = MachineTypesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = MachineTypesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = MachineTypesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = MachineTypesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = MachineTypesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = MachineTypesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = MachineTypesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = MachineTypesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = MachineTypesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = MachineTypesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.MachineTypesTransport, '_prep_wrapped_messages') as prep: + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.MachineTypesTransport, '_prep_wrapped_messages') as prep: + transport_class = MachineTypesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = MachineTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_network_endpoint_groups.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_network_endpoint_groups.py new file mode 100644 index 000000000..782ca139d --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_network_endpoint_groups.py @@ -0,0 +1,2042 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.network_endpoint_groups import NetworkEndpointGroupsClient +from google.cloud.compute_v1.services.network_endpoint_groups import pagers +from google.cloud.compute_v1.services.network_endpoint_groups import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NetworkEndpointGroupsClient._get_default_mtls_endpoint(None) is None + assert NetworkEndpointGroupsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert NetworkEndpointGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert NetworkEndpointGroupsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert NetworkEndpointGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert NetworkEndpointGroupsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + NetworkEndpointGroupsClient, +]) +def test_network_endpoint_groups_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.NetworkEndpointGroupsRestTransport, "rest"), +]) +def test_network_endpoint_groups_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + NetworkEndpointGroupsClient, +]) +def test_network_endpoint_groups_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_network_endpoint_groups_client_get_transport_class(): + transport = NetworkEndpointGroupsClient.get_transport_class() + available_transports = [ + transports.NetworkEndpointGroupsRestTransport, + ] + assert transport in available_transports + + transport = NetworkEndpointGroupsClient.get_transport_class("rest") + assert transport == transports.NetworkEndpointGroupsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NetworkEndpointGroupsClient, transports.NetworkEndpointGroupsRestTransport, "rest"), +]) +@mock.patch.object(NetworkEndpointGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NetworkEndpointGroupsClient)) +def test_network_endpoint_groups_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NetworkEndpointGroupsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NetworkEndpointGroupsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (NetworkEndpointGroupsClient, transports.NetworkEndpointGroupsRestTransport, "rest", "true"), + (NetworkEndpointGroupsClient, transports.NetworkEndpointGroupsRestTransport, "rest", "false"), +]) +@mock.patch.object(NetworkEndpointGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NetworkEndpointGroupsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_network_endpoint_groups_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NetworkEndpointGroupsClient, transports.NetworkEndpointGroupsRestTransport, "rest"), +]) +def test_network_endpoint_groups_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NetworkEndpointGroupsClient, transports.NetworkEndpointGroupsRestTransport, "rest"), +]) +def test_network_endpoint_groups_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListNetworkEndpointGroupsRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListNetworkEndpointGroupsRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/networkEndpointGroups" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListNetworkEndpointGroupsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NetworkEndpointGroupAggregatedList( + items={ + 'a':compute.NetworkEndpointGroupsScopedList(), + 'b':compute.NetworkEndpointGroupsScopedList(), + 'c':compute.NetworkEndpointGroupsScopedList(), + }, + next_page_token='abc', + ), + compute.NetworkEndpointGroupAggregatedList( + items={}, + next_page_token='def', + ), + compute.NetworkEndpointGroupAggregatedList( + items={ + 'g':compute.NetworkEndpointGroupsScopedList(), + }, + next_page_token='ghi', + ), + compute.NetworkEndpointGroupAggregatedList( + items={ + 'h':compute.NetworkEndpointGroupsScopedList(), + 'i':compute.NetworkEndpointGroupsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NetworkEndpointGroupAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.NetworkEndpointGroupsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.NetworkEndpointGroupsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.NetworkEndpointGroupsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_attach_network_endpoints_rest(transport: str = 'rest', request_type=compute.AttachNetworkEndpointsNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request_init["network_endpoint_groups_attach_endpoints_request_resource"] = compute.NetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.attach_network_endpoints(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_attach_network_endpoints_rest_bad_request(transport: str = 'rest', request_type=compute.AttachNetworkEndpointsNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request_init["network_endpoint_groups_attach_endpoints_request_resource"] = compute.NetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.attach_network_endpoints(request) + + +def test_attach_network_endpoints_rest_from_dict(): + test_attach_network_endpoints_rest(request_type=dict) + + +def test_attach_network_endpoints_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + network_endpoint_groups_attach_endpoints_request_resource=compute.NetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + mock_args.update(sample_request) + client.attach_network_endpoints(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}/attachNetworkEndpoints" % client.transport._host, args[1]) + + +def test_attach_network_endpoints_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.attach_network_endpoints( + compute.AttachNetworkEndpointsNetworkEndpointGroupRequest(), + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + network_endpoint_groups_attach_endpoints_request_resource=compute.NetworkEndpointGroupsAttachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteNetworkEndpointGroupRequest(), + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + ) + + +def test_detach_network_endpoints_rest(transport: str = 'rest', request_type=compute.DetachNetworkEndpointsNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request_init["network_endpoint_groups_detach_endpoints_request_resource"] = compute.NetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.detach_network_endpoints(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_detach_network_endpoints_rest_bad_request(transport: str = 'rest', request_type=compute.DetachNetworkEndpointsNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request_init["network_endpoint_groups_detach_endpoints_request_resource"] = compute.NetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.detach_network_endpoints(request) + + +def test_detach_network_endpoints_rest_from_dict(): + test_detach_network_endpoints_rest(request_type=dict) + + +def test_detach_network_endpoints_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + network_endpoint_groups_detach_endpoints_request_resource=compute.NetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + mock_args.update(sample_request) + client.detach_network_endpoints(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}/detachNetworkEndpoints" % client.transport._host, args[1]) + + +def test_detach_network_endpoints_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.detach_network_endpoints( + compute.DetachNetworkEndpointsNetworkEndpointGroupRequest(), + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + network_endpoint_groups_detach_endpoints_request_resource=compute.NetworkEndpointGroupsDetachEndpointsRequest(network_endpoints=[compute.NetworkEndpoint(annotations={'key_value': 'value_value'})]), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroup( + creation_timestamp='creation_timestamp_value', + default_port=1289, + description='description_value', + id=205, + kind='kind_value', + name='name_value', + network='network_value', + network_endpoint_type='network_endpoint_type_value', + region='region_value', + self_link='self_link_value', + size=443, + subnetwork='subnetwork_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroup.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NetworkEndpointGroup) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_port == 1289 + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.network_endpoint_type == 'network_endpoint_type_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.size == 443 + assert response.subnetwork == 'subnetwork_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroup() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroup.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetNetworkEndpointGroupRequest(), + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["network_endpoint_group_resource"] = compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["network_endpoint_group_resource"] = compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + network_endpoint_group_resource=compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertNetworkEndpointGroupRequest(), + project='project_value', + zone='zone_value', + network_endpoint_group_resource=compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListNetworkEndpointGroupsRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListNetworkEndpointGroupsRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListNetworkEndpointGroupsRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + ], + next_page_token='abc', + ), + compute.NetworkEndpointGroupList( + items=[], + next_page_token='def', + ), + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + ], + next_page_token='ghi', + ), + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NetworkEndpointGroupList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NetworkEndpointGroup) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_network_endpoints_rest(transport: str = 'rest', request_type=compute.ListNetworkEndpointsNetworkEndpointGroupsRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request_init["network_endpoint_groups_list_endpoints_request_resource"] = compute.NetworkEndpointGroupsListEndpointsRequest(health_status='health_status_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupsListNetworkEndpoints( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupsListNetworkEndpoints.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_network_endpoints(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNetworkEndpointsPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + + +def test_list_network_endpoints_rest_bad_request(transport: str = 'rest', request_type=compute.ListNetworkEndpointsNetworkEndpointGroupsRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + request_init["network_endpoint_groups_list_endpoints_request_resource"] = compute.NetworkEndpointGroupsListEndpointsRequest(health_status='health_status_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_network_endpoints(request) + + +def test_list_network_endpoints_rest_from_dict(): + test_list_network_endpoints_rest(request_type=dict) + + +def test_list_network_endpoints_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupsListNetworkEndpoints() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupsListNetworkEndpoints.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + network_endpoint_groups_list_endpoints_request_resource=compute.NetworkEndpointGroupsListEndpointsRequest(health_status='health_status_value'), + ) + mock_args.update(sample_request) + client.list_network_endpoints(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{network_endpoint_group}/listNetworkEndpoints" % client.transport._host, args[1]) + + +def test_list_network_endpoints_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_network_endpoints( + compute.ListNetworkEndpointsNetworkEndpointGroupsRequest(), + project='project_value', + zone='zone_value', + network_endpoint_group='network_endpoint_group_value', + network_endpoint_groups_list_endpoints_request_resource=compute.NetworkEndpointGroupsListEndpointsRequest(health_status='health_status_value'), + ) + + +def test_list_network_endpoints_rest_pager(): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[ + compute.NetworkEndpointWithHealthStatus(), + compute.NetworkEndpointWithHealthStatus(), + compute.NetworkEndpointWithHealthStatus(), + ], + next_page_token='abc', + ), + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[], + next_page_token='def', + ), + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[ + compute.NetworkEndpointWithHealthStatus(), + ], + next_page_token='ghi', + ), + compute.NetworkEndpointGroupsListNetworkEndpoints( + items=[ + compute.NetworkEndpointWithHealthStatus(), + compute.NetworkEndpointWithHealthStatus(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NetworkEndpointGroupsListNetworkEndpoints.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2", "network_endpoint_group": "sample3"} + sample_request["network_endpoint_groups_list_endpoints_request_resource"] = compute.NetworkEndpointGroupsListEndpointsRequest(health_status='health_status_value') + + pager = client.list_network_endpoints(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NetworkEndpointWithHealthStatus) + for i in results) + + pages = list(client.list_network_endpoints(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsNetworkEndpointGroupRequest): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/networkEndpointGroups/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsNetworkEndpointGroupRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NetworkEndpointGroupsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NetworkEndpointGroupsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NetworkEndpointGroupsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.NetworkEndpointGroupsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_network_endpoint_groups_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NetworkEndpointGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_network_endpoint_groups_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.network_endpoint_groups.transports.NetworkEndpointGroupsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.NetworkEndpointGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'attach_network_endpoints', + 'delete', + 'detach_network_endpoints', + 'get', + 'insert', + 'list', + 'list_network_endpoints', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_network_endpoint_groups_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.network_endpoint_groups.transports.NetworkEndpointGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NetworkEndpointGroupsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_network_endpoint_groups_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.network_endpoint_groups.transports.NetworkEndpointGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NetworkEndpointGroupsTransport() + adc.assert_called_once() + + +def test_network_endpoint_groups_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NetworkEndpointGroupsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_network_endpoint_groups_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.NetworkEndpointGroupsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_network_endpoint_groups_host_no_port(): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_network_endpoint_groups_host_with_port(): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = NetworkEndpointGroupsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = NetworkEndpointGroupsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NetworkEndpointGroupsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = NetworkEndpointGroupsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = NetworkEndpointGroupsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NetworkEndpointGroupsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = NetworkEndpointGroupsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = NetworkEndpointGroupsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NetworkEndpointGroupsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = NetworkEndpointGroupsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = NetworkEndpointGroupsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NetworkEndpointGroupsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = NetworkEndpointGroupsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = NetworkEndpointGroupsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NetworkEndpointGroupsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.NetworkEndpointGroupsTransport, '_prep_wrapped_messages') as prep: + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.NetworkEndpointGroupsTransport, '_prep_wrapped_messages') as prep: + transport_class = NetworkEndpointGroupsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = NetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_networks.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_networks.py new file mode 100644 index 000000000..76b7c4a27 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_networks.py @@ -0,0 +1,2286 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.networks import NetworksClient +from google.cloud.compute_v1.services.networks import pagers +from google.cloud.compute_v1.services.networks import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NetworksClient._get_default_mtls_endpoint(None) is None + assert NetworksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert NetworksClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert NetworksClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert NetworksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert NetworksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + NetworksClient, +]) +def test_networks_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.NetworksRestTransport, "rest"), +]) +def test_networks_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + NetworksClient, +]) +def test_networks_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_networks_client_get_transport_class(): + transport = NetworksClient.get_transport_class() + available_transports = [ + transports.NetworksRestTransport, + ] + assert transport in available_transports + + transport = NetworksClient.get_transport_class("rest") + assert transport == transports.NetworksRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NetworksClient, transports.NetworksRestTransport, "rest"), +]) +@mock.patch.object(NetworksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NetworksClient)) +def test_networks_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NetworksClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NetworksClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (NetworksClient, transports.NetworksRestTransport, "rest", "true"), + (NetworksClient, transports.NetworksRestTransport, "rest", "false"), +]) +@mock.patch.object(NetworksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NetworksClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_networks_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NetworksClient, transports.NetworksRestTransport, "rest"), +]) +def test_networks_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NetworksClient, transports.NetworksRestTransport, "rest"), +]) +def test_networks_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_peering_rest(transport: str = 'rest', request_type=compute.AddPeeringNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["networks_add_peering_request_resource"] = compute.NetworksAddPeeringRequest(auto_create_routes=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_peering(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_peering_rest_bad_request(transport: str = 'rest', request_type=compute.AddPeeringNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["networks_add_peering_request_resource"] = compute.NetworksAddPeeringRequest(auto_create_routes=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_peering(request) + + +def test_add_peering_rest_from_dict(): + test_add_peering_rest(request_type=dict) + + +def test_add_peering_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + networks_add_peering_request_resource=compute.NetworksAddPeeringRequest(auto_create_routes=True), + ) + mock_args.update(sample_request) + client.add_peering(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}/addPeering" % client.transport._host, args[1]) + + +def test_add_peering_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_peering( + compute.AddPeeringNetworkRequest(), + project='project_value', + network='network_value', + networks_add_peering_request_resource=compute.NetworksAddPeeringRequest(auto_create_routes=True), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteNetworkRequest(), + project='project_value', + network='network_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Network( + I_pv4_range='I_pv4_range_value', + auto_create_subnetworks=True, + creation_timestamp='creation_timestamp_value', + description='description_value', + gateway_i_pv4='gateway_i_pv4_value', + id=205, + kind='kind_value', + mtu=342, + name='name_value', + self_link='self_link_value', + subnetworks=['subnetworks_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Network.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Network) + assert response.I_pv4_range == 'I_pv4_range_value' + assert response.auto_create_subnetworks is True + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.gateway_i_pv4 == 'gateway_i_pv4_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.mtu == 342 + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.subnetworks == ['subnetworks_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Network() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Network.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetNetworkRequest(), + project='project_value', + network='network_value', + ) + + +def test_get_effective_firewalls_rest(transport: str = 'rest', request_type=compute.GetEffectiveFirewallsNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworksGetEffectiveFirewallsResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworksGetEffectiveFirewallsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_effective_firewalls(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NetworksGetEffectiveFirewallsResponse) + + +def test_get_effective_firewalls_rest_bad_request(transport: str = 'rest', request_type=compute.GetEffectiveFirewallsNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_effective_firewalls(request) + + +def test_get_effective_firewalls_rest_from_dict(): + test_get_effective_firewalls_rest(request_type=dict) + + +def test_get_effective_firewalls_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworksGetEffectiveFirewallsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworksGetEffectiveFirewallsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + ) + mock_args.update(sample_request) + client.get_effective_firewalls(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}/getEffectiveFirewalls" % client.transport._host, args[1]) + + +def test_get_effective_firewalls_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_effective_firewalls( + compute.GetEffectiveFirewallsNetworkRequest(), + project='project_value', + network='network_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["network_resource"] = compute.Network(I_pv4_range='I_pv4_range_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["network_resource"] = compute.Network(I_pv4_range='I_pv4_range_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network_resource=compute.Network(I_pv4_range='I_pv4_range_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertNetworkRequest(), + project='project_value', + network_resource=compute.Network(I_pv4_range='I_pv4_range_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListNetworksRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListNetworksRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListNetworksRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NetworkList( + items=[ + compute.Network(), + compute.Network(), + compute.Network(), + ], + next_page_token='abc', + ), + compute.NetworkList( + items=[], + next_page_token='def', + ), + compute.NetworkList( + items=[ + compute.Network(), + ], + next_page_token='ghi', + ), + compute.NetworkList( + items=[ + compute.Network(), + compute.Network(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NetworkList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Network) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_peering_routes_rest(transport: str = 'rest', request_type=compute.ListPeeringRoutesNetworksRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ExchangedPeeringRoutesList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ExchangedPeeringRoutesList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_peering_routes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPeeringRoutesPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_peering_routes_rest_bad_request(transport: str = 'rest', request_type=compute.ListPeeringRoutesNetworksRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_peering_routes(request) + + +def test_list_peering_routes_rest_from_dict(): + test_list_peering_routes_rest(request_type=dict) + + +def test_list_peering_routes_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ExchangedPeeringRoutesList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ExchangedPeeringRoutesList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + ) + mock_args.update(sample_request) + client.list_peering_routes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}/listPeeringRoutes" % client.transport._host, args[1]) + + +def test_list_peering_routes_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_peering_routes( + compute.ListPeeringRoutesNetworksRequest(), + project='project_value', + network='network_value', + ) + + +def test_list_peering_routes_rest_pager(): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ExchangedPeeringRoutesList( + items=[ + compute.ExchangedPeeringRoute(), + compute.ExchangedPeeringRoute(), + compute.ExchangedPeeringRoute(), + ], + next_page_token='abc', + ), + compute.ExchangedPeeringRoutesList( + items=[], + next_page_token='def', + ), + compute.ExchangedPeeringRoutesList( + items=[ + compute.ExchangedPeeringRoute(), + ], + next_page_token='ghi', + ), + compute.ExchangedPeeringRoutesList( + items=[ + compute.ExchangedPeeringRoute(), + compute.ExchangedPeeringRoute(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ExchangedPeeringRoutesList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "network": "sample2"} + + pager = client.list_peering_routes(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ExchangedPeeringRoute) + for i in results) + + pages = list(client.list_peering_routes(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["network_resource"] = compute.Network(I_pv4_range='I_pv4_range_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["network_resource"] = compute.Network(I_pv4_range='I_pv4_range_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + network_resource=compute.Network(I_pv4_range='I_pv4_range_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchNetworkRequest(), + project='project_value', + network='network_value', + network_resource=compute.Network(I_pv4_range='I_pv4_range_value'), + ) + + +def test_remove_peering_rest(transport: str = 'rest', request_type=compute.RemovePeeringNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["networks_remove_peering_request_resource"] = compute.NetworksRemovePeeringRequest(name='name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_peering(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_peering_rest_bad_request(transport: str = 'rest', request_type=compute.RemovePeeringNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["networks_remove_peering_request_resource"] = compute.NetworksRemovePeeringRequest(name='name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_peering(request) + + +def test_remove_peering_rest_from_dict(): + test_remove_peering_rest(request_type=dict) + + +def test_remove_peering_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + networks_remove_peering_request_resource=compute.NetworksRemovePeeringRequest(name='name_value'), + ) + mock_args.update(sample_request) + client.remove_peering(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}/removePeering" % client.transport._host, args[1]) + + +def test_remove_peering_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_peering( + compute.RemovePeeringNetworkRequest(), + project='project_value', + network='network_value', + networks_remove_peering_request_resource=compute.NetworksRemovePeeringRequest(name='name_value'), + ) + + +def test_switch_to_custom_mode_rest(transport: str = 'rest', request_type=compute.SwitchToCustomModeNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.switch_to_custom_mode(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_switch_to_custom_mode_rest_bad_request(transport: str = 'rest', request_type=compute.SwitchToCustomModeNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.switch_to_custom_mode(request) + + +def test_switch_to_custom_mode_rest_from_dict(): + test_switch_to_custom_mode_rest(request_type=dict) + + +def test_switch_to_custom_mode_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + ) + mock_args.update(sample_request) + client.switch_to_custom_mode(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}/switchToCustomMode" % client.transport._host, args[1]) + + +def test_switch_to_custom_mode_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.switch_to_custom_mode( + compute.SwitchToCustomModeNetworkRequest(), + project='project_value', + network='network_value', + ) + + +def test_update_peering_rest(transport: str = 'rest', request_type=compute.UpdatePeeringNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["networks_update_peering_request_resource"] = compute.NetworksUpdatePeeringRequest(network_peering=compute.NetworkPeering(auto_create_routes=True)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_peering(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_peering_rest_bad_request(transport: str = 'rest', request_type=compute.UpdatePeeringNetworkRequest): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "network": "sample2"} + request_init["networks_update_peering_request_resource"] = compute.NetworksUpdatePeeringRequest(network_peering=compute.NetworkPeering(auto_create_routes=True)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_peering(request) + + +def test_update_peering_rest_from_dict(): + test_update_peering_rest(request_type=dict) + + +def test_update_peering_rest_flattened(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "network": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + network='network_value', + networks_update_peering_request_resource=compute.NetworksUpdatePeeringRequest(network_peering=compute.NetworkPeering(auto_create_routes=True)), + ) + mock_args.update(sample_request) + client.update_peering(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/networks/{network}/updatePeering" % client.transport._host, args[1]) + + +def test_update_peering_rest_flattened_error(transport: str = 'rest'): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_peering( + compute.UpdatePeeringNetworkRequest(), + project='project_value', + network='network_value', + networks_update_peering_request_resource=compute.NetworksUpdatePeeringRequest(network_peering=compute.NetworkPeering(auto_create_routes=True)), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NetworksClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NetworksClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NetworksClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.NetworksRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_networks_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NetworksTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_networks_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.networks.transports.NetworksTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.NetworksTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_peering', + 'delete', + 'get', + 'get_effective_firewalls', + 'insert', + 'list', + 'list_peering_routes', + 'patch', + 'remove_peering', + 'switch_to_custom_mode', + 'update_peering', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_networks_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.networks.transports.NetworksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NetworksTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_networks_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.networks.transports.NetworksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NetworksTransport() + adc.assert_called_once() + + +def test_networks_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NetworksClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_networks_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.NetworksRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_networks_host_no_port(): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_networks_host_with_port(): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = NetworksClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = NetworksClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NetworksClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = NetworksClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = NetworksClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NetworksClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = NetworksClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = NetworksClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NetworksClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = NetworksClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = NetworksClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NetworksClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = NetworksClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = NetworksClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NetworksClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.NetworksTransport, '_prep_wrapped_messages') as prep: + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.NetworksTransport, '_prep_wrapped_messages') as prep: + transport_class = NetworksClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = NetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_groups.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_groups.py new file mode 100644 index 000000000..508d5632b --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_groups.py @@ -0,0 +1,2585 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.node_groups import NodeGroupsClient +from google.cloud.compute_v1.services.node_groups import pagers +from google.cloud.compute_v1.services.node_groups import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NodeGroupsClient._get_default_mtls_endpoint(None) is None + assert NodeGroupsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert NodeGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert NodeGroupsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert NodeGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert NodeGroupsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + NodeGroupsClient, +]) +def test_node_groups_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.NodeGroupsRestTransport, "rest"), +]) +def test_node_groups_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + NodeGroupsClient, +]) +def test_node_groups_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_node_groups_client_get_transport_class(): + transport = NodeGroupsClient.get_transport_class() + available_transports = [ + transports.NodeGroupsRestTransport, + ] + assert transport in available_transports + + transport = NodeGroupsClient.get_transport_class("rest") + assert transport == transports.NodeGroupsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeGroupsClient, transports.NodeGroupsRestTransport, "rest"), +]) +@mock.patch.object(NodeGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NodeGroupsClient)) +def test_node_groups_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NodeGroupsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NodeGroupsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (NodeGroupsClient, transports.NodeGroupsRestTransport, "rest", "true"), + (NodeGroupsClient, transports.NodeGroupsRestTransport, "rest", "false"), +]) +@mock.patch.object(NodeGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NodeGroupsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_node_groups_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeGroupsClient, transports.NodeGroupsRestTransport, "rest"), +]) +def test_node_groups_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeGroupsClient, transports.NodeGroupsRestTransport, "rest"), +]) +def test_node_groups_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_nodes_rest(transport: str = 'rest', request_type=compute.AddNodesNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_groups_add_nodes_request_resource"] = compute.NodeGroupsAddNodesRequest(additional_node_count=2214) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_nodes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_nodes_rest_bad_request(transport: str = 'rest', request_type=compute.AddNodesNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_groups_add_nodes_request_resource"] = compute.NodeGroupsAddNodesRequest(additional_node_count=2214) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_nodes(request) + + +def test_add_nodes_rest_from_dict(): + test_add_nodes_rest(request_type=dict) + + +def test_add_nodes_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_groups_add_nodes_request_resource=compute.NodeGroupsAddNodesRequest(additional_node_count=2214), + ) + mock_args.update(sample_request) + client.add_nodes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/addNodes" % client.transport._host, args[1]) + + +def test_add_nodes_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_nodes( + compute.AddNodesNodeGroupRequest(), + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_groups_add_nodes_request_resource=compute.NodeGroupsAddNodesRequest(additional_node_count=2214), + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListNodeGroupsRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroupAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroupAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListNodeGroupsRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroupAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroupAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/nodeGroups" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListNodeGroupsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NodeGroupAggregatedList( + items={ + 'a':compute.NodeGroupsScopedList(), + 'b':compute.NodeGroupsScopedList(), + 'c':compute.NodeGroupsScopedList(), + }, + next_page_token='abc', + ), + compute.NodeGroupAggregatedList( + items={}, + next_page_token='def', + ), + compute.NodeGroupAggregatedList( + items={ + 'g':compute.NodeGroupsScopedList(), + }, + next_page_token='ghi', + ), + compute.NodeGroupAggregatedList( + items={ + 'h':compute.NodeGroupsScopedList(), + 'i':compute.NodeGroupsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NodeGroupAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.NodeGroupsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.NodeGroupsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.NodeGroupsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_group='node_group_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteNodeGroupRequest(), + project='project_value', + zone='zone_value', + node_group='node_group_value', + ) + + +def test_delete_nodes_rest(transport: str = 'rest', request_type=compute.DeleteNodesNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_groups_delete_nodes_request_resource"] = compute.NodeGroupsDeleteNodesRequest(nodes=['nodes_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_nodes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_nodes_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteNodesNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_groups_delete_nodes_request_resource"] = compute.NodeGroupsDeleteNodesRequest(nodes=['nodes_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_nodes(request) + + +def test_delete_nodes_rest_from_dict(): + test_delete_nodes_rest(request_type=dict) + + +def test_delete_nodes_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_groups_delete_nodes_request_resource=compute.NodeGroupsDeleteNodesRequest(nodes=['nodes_value']), + ) + mock_args.update(sample_request) + client.delete_nodes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/deleteNodes" % client.transport._host, args[1]) + + +def test_delete_nodes_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_nodes( + compute.DeleteNodesNodeGroupRequest(), + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_groups_delete_nodes_request_resource=compute.NodeGroupsDeleteNodesRequest(nodes=['nodes_value']), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroup( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + location_hint='location_hint_value', + maintenance_policy='maintenance_policy_value', + name='name_value', + node_template='node_template_value', + self_link='self_link_value', + size=443, + status='status_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroup.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NodeGroup) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.location_hint == 'location_hint_value' + assert response.maintenance_policy == 'maintenance_policy_value' + assert response.name == 'name_value' + assert response.node_template == 'node_template_value' + assert response.self_link == 'self_link_value' + assert response.size == 443 + assert response.status == 'status_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroup() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroup.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_group='node_group_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetNodeGroupRequest(), + project='project_value', + zone='zone_value', + node_group='node_group_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyNodeGroupRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["node_group_resource"] = compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["node_group_resource"] = compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + initial_node_count=1911, + node_group_resource=compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertNodeGroupRequest(), + project='project_value', + zone='zone_value', + initial_node_count=1911, + node_group_resource=compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListNodeGroupsRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroupList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroupList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListNodeGroupsRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroupList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroupList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListNodeGroupsRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NodeGroupList( + items=[ + compute.NodeGroup(), + compute.NodeGroup(), + compute.NodeGroup(), + ], + next_page_token='abc', + ), + compute.NodeGroupList( + items=[], + next_page_token='def', + ), + compute.NodeGroupList( + items=[ + compute.NodeGroup(), + ], + next_page_token='ghi', + ), + compute.NodeGroupList( + items=[ + compute.NodeGroup(), + compute.NodeGroup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NodeGroupList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NodeGroup) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_nodes_rest(transport: str = 'rest', request_type=compute.ListNodesNodeGroupsRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroupsListNodes( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroupsListNodes.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_nodes(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListNodesPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_nodes_rest_bad_request(transport: str = 'rest', request_type=compute.ListNodesNodeGroupsRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_nodes(request) + + +def test_list_nodes_rest_from_dict(): + test_list_nodes_rest(request_type=dict) + + +def test_list_nodes_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeGroupsListNodes() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeGroupsListNodes.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_group='node_group_value', + ) + mock_args.update(sample_request) + client.list_nodes(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/listNodes" % client.transport._host, args[1]) + + +def test_list_nodes_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_nodes( + compute.ListNodesNodeGroupsRequest(), + project='project_value', + zone='zone_value', + node_group='node_group_value', + ) + + +def test_list_nodes_rest_pager(): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NodeGroupsListNodes( + items=[ + compute.NodeGroupNode(), + compute.NodeGroupNode(), + compute.NodeGroupNode(), + ], + next_page_token='abc', + ), + compute.NodeGroupsListNodes( + items=[], + next_page_token='def', + ), + compute.NodeGroupsListNodes( + items=[ + compute.NodeGroupNode(), + ], + next_page_token='ghi', + ), + compute.NodeGroupsListNodes( + items=[ + compute.NodeGroupNode(), + compute.NodeGroupNode(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NodeGroupsListNodes.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + pager = client.list_nodes(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NodeGroupNode) + for i in results) + + pages = list(client.list_nodes(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_group_resource"] = compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_group_resource"] = compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_group_resource=compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchNodeGroupRequest(), + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_group_resource=compute.NodeGroup(autoscaling_policy=compute.NodeGroupAutoscalingPolicy(max_nodes=958)), + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyNodeGroupRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_set_node_template_rest(transport: str = 'rest', request_type=compute.SetNodeTemplateNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_groups_set_node_template_request_resource"] = compute.NodeGroupsSetNodeTemplateRequest(node_template='node_template_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_node_template(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_node_template_rest_bad_request(transport: str = 'rest', request_type=compute.SetNodeTemplateNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + request_init["node_groups_set_node_template_request_resource"] = compute.NodeGroupsSetNodeTemplateRequest(node_template='node_template_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_node_template(request) + + +def test_set_node_template_rest_from_dict(): + test_set_node_template_rest(request_type=dict) + + +def test_set_node_template_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_groups_set_node_template_request_resource=compute.NodeGroupsSetNodeTemplateRequest(node_template='node_template_value'), + ) + mock_args.update(sample_request) + client.set_node_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{node_group}/setNodeTemplate" % client.transport._host, args[1]) + + +def test_set_node_template_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_node_template( + compute.SetNodeTemplateNodeGroupRequest(), + project='project_value', + zone='zone_value', + node_group='node_group_value', + node_groups_set_node_template_request_resource=compute.NodeGroupsSetNodeTemplateRequest(node_template='node_template_value'), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsNodeGroupRequest): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeGroups/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsNodeGroupRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NodeGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NodeGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeGroupsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NodeGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeGroupsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NodeGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NodeGroupsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.NodeGroupsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_node_groups_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NodeGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_node_groups_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.node_groups.transports.NodeGroupsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.NodeGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_nodes', + 'aggregated_list', + 'delete', + 'delete_nodes', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'list_nodes', + 'patch', + 'set_iam_policy', + 'set_node_template', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_node_groups_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.node_groups.transports.NodeGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeGroupsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_node_groups_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.node_groups.transports.NodeGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeGroupsTransport() + adc.assert_called_once() + + +def test_node_groups_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NodeGroupsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_node_groups_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.NodeGroupsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_node_groups_host_no_port(): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_node_groups_host_with_port(): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = NodeGroupsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = NodeGroupsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = NodeGroupsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = NodeGroupsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = NodeGroupsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = NodeGroupsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = NodeGroupsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = NodeGroupsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = NodeGroupsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = NodeGroupsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.NodeGroupsTransport, '_prep_wrapped_messages') as prep: + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.NodeGroupsTransport, '_prep_wrapped_messages') as prep: + transport_class = NodeGroupsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = NodeGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_templates.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_templates.py new file mode 100644 index 000000000..7245c598c --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_templates.py @@ -0,0 +1,1778 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.node_templates import NodeTemplatesClient +from google.cloud.compute_v1.services.node_templates import pagers +from google.cloud.compute_v1.services.node_templates import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NodeTemplatesClient._get_default_mtls_endpoint(None) is None + assert NodeTemplatesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert NodeTemplatesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert NodeTemplatesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert NodeTemplatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert NodeTemplatesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + NodeTemplatesClient, +]) +def test_node_templates_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.NodeTemplatesRestTransport, "rest"), +]) +def test_node_templates_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + NodeTemplatesClient, +]) +def test_node_templates_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_node_templates_client_get_transport_class(): + transport = NodeTemplatesClient.get_transport_class() + available_transports = [ + transports.NodeTemplatesRestTransport, + ] + assert transport in available_transports + + transport = NodeTemplatesClient.get_transport_class("rest") + assert transport == transports.NodeTemplatesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeTemplatesClient, transports.NodeTemplatesRestTransport, "rest"), +]) +@mock.patch.object(NodeTemplatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NodeTemplatesClient)) +def test_node_templates_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NodeTemplatesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NodeTemplatesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (NodeTemplatesClient, transports.NodeTemplatesRestTransport, "rest", "true"), + (NodeTemplatesClient, transports.NodeTemplatesRestTransport, "rest", "false"), +]) +@mock.patch.object(NodeTemplatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NodeTemplatesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_node_templates_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeTemplatesClient, transports.NodeTemplatesRestTransport, "rest"), +]) +def test_node_templates_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeTemplatesClient, transports.NodeTemplatesRestTransport, "rest"), +]) +def test_node_templates_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListNodeTemplatesRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTemplateAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTemplateAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListNodeTemplatesRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTemplateAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTemplateAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/nodeTemplates" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListNodeTemplatesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NodeTemplateAggregatedList( + items={ + 'a':compute.NodeTemplatesScopedList(), + 'b':compute.NodeTemplatesScopedList(), + 'c':compute.NodeTemplatesScopedList(), + }, + next_page_token='abc', + ), + compute.NodeTemplateAggregatedList( + items={}, + next_page_token='def', + ), + compute.NodeTemplateAggregatedList( + items={ + 'g':compute.NodeTemplatesScopedList(), + }, + next_page_token='ghi', + ), + compute.NodeTemplateAggregatedList( + items={ + 'h':compute.NodeTemplatesScopedList(), + 'i':compute.NodeTemplatesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NodeTemplateAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.NodeTemplatesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.NodeTemplatesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.NodeTemplatesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "node_template": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "node_template": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "node_template": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + node_template='node_template_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{node_template}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteNodeTemplateRequest(), + project='project_value', + region='region_value', + node_template='node_template_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "node_template": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTemplate( + cpu_overcommit_type='cpu_overcommit_type_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + node_type='node_type_value', + region='region_value', + self_link='self_link_value', + status='status_value', + status_message='status_message_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTemplate.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NodeTemplate) + assert response.cpu_overcommit_type == 'cpu_overcommit_type_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.node_type == 'node_type_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.status_message == 'status_message_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "node_template": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTemplate() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTemplate.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "node_template": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + node_template='node_template_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{node_template}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetNodeTemplateRequest(), + project='project_value', + region='region_value', + node_template='node_template_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyNodeTemplateRequest(), + project='project_value', + region='region_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["node_template_resource"] = compute.NodeTemplate(accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["node_template_resource"] = compute.NodeTemplate(accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + node_template_resource=compute.NodeTemplate(accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/nodeTemplates" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertNodeTemplateRequest(), + project='project_value', + region='region_value', + node_template_resource=compute.NodeTemplate(accelerators=[compute.AcceleratorConfig(accelerator_count=1805)]), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListNodeTemplatesRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTemplateList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTemplateList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListNodeTemplatesRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTemplateList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTemplateList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/nodeTemplates" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListNodeTemplatesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NodeTemplateList( + items=[ + compute.NodeTemplate(), + compute.NodeTemplate(), + compute.NodeTemplate(), + ], + next_page_token='abc', + ), + compute.NodeTemplateList( + items=[], + next_page_token='def', + ), + compute.NodeTemplateList( + items=[ + compute.NodeTemplate(), + ], + next_page_token='ghi', + ), + compute.NodeTemplateList( + items=[ + compute.NodeTemplate(), + compute.NodeTemplate(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NodeTemplateList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NodeTemplate) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyNodeTemplateRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsNodeTemplateRequest): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/nodeTemplates/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsNodeTemplateRequest(), + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NodeTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NodeTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeTemplatesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NodeTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeTemplatesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NodeTemplatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NodeTemplatesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.NodeTemplatesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_node_templates_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NodeTemplatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_node_templates_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.node_templates.transports.NodeTemplatesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.NodeTemplatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_node_templates_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.node_templates.transports.NodeTemplatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeTemplatesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_node_templates_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.node_templates.transports.NodeTemplatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeTemplatesTransport() + adc.assert_called_once() + + +def test_node_templates_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NodeTemplatesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_node_templates_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.NodeTemplatesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_node_templates_host_no_port(): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_node_templates_host_with_port(): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = NodeTemplatesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = NodeTemplatesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTemplatesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = NodeTemplatesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = NodeTemplatesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTemplatesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = NodeTemplatesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = NodeTemplatesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTemplatesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = NodeTemplatesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = NodeTemplatesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTemplatesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = NodeTemplatesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = NodeTemplatesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTemplatesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.NodeTemplatesTransport, '_prep_wrapped_messages') as prep: + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.NodeTemplatesTransport, '_prep_wrapped_messages') as prep: + transport_class = NodeTemplatesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = NodeTemplatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_types.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_types.py new file mode 100644 index 000000000..cf0045ed5 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_node_types.py @@ -0,0 +1,1128 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.node_types import NodeTypesClient +from google.cloud.compute_v1.services.node_types import pagers +from google.cloud.compute_v1.services.node_types import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NodeTypesClient._get_default_mtls_endpoint(None) is None + assert NodeTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert NodeTypesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert NodeTypesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert NodeTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert NodeTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + NodeTypesClient, +]) +def test_node_types_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.NodeTypesRestTransport, "rest"), +]) +def test_node_types_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + NodeTypesClient, +]) +def test_node_types_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_node_types_client_get_transport_class(): + transport = NodeTypesClient.get_transport_class() + available_transports = [ + transports.NodeTypesRestTransport, + ] + assert transport in available_transports + + transport = NodeTypesClient.get_transport_class("rest") + assert transport == transports.NodeTypesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeTypesClient, transports.NodeTypesRestTransport, "rest"), +]) +@mock.patch.object(NodeTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NodeTypesClient)) +def test_node_types_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NodeTypesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NodeTypesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (NodeTypesClient, transports.NodeTypesRestTransport, "rest", "true"), + (NodeTypesClient, transports.NodeTypesRestTransport, "rest", "false"), +]) +@mock.patch.object(NodeTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(NodeTypesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_node_types_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeTypesClient, transports.NodeTypesRestTransport, "rest"), +]) +def test_node_types_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (NodeTypesClient, transports.NodeTypesRestTransport, "rest"), +]) +def test_node_types_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListNodeTypesRequest): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTypeAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTypeAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListNodeTypesRequest): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTypeAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTypeAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/nodeTypes" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListNodeTypesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NodeTypeAggregatedList( + items={ + 'a':compute.NodeTypesScopedList(), + 'b':compute.NodeTypesScopedList(), + 'c':compute.NodeTypesScopedList(), + }, + next_page_token='abc', + ), + compute.NodeTypeAggregatedList( + items={}, + next_page_token='def', + ), + compute.NodeTypeAggregatedList( + items={ + 'g':compute.NodeTypesScopedList(), + }, + next_page_token='ghi', + ), + compute.NodeTypeAggregatedList( + items={ + 'h':compute.NodeTypesScopedList(), + 'i':compute.NodeTypesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NodeTypeAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.NodeTypesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.NodeTypesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.NodeTypesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetNodeTypeRequest): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeType( + cpu_platform='cpu_platform_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + guest_cpus=1090, + id=205, + kind='kind_value', + local_ssd_gb=1244, + memory_mb=967, + name='name_value', + self_link='self_link_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeType.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NodeType) + assert response.cpu_platform == 'cpu_platform_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.guest_cpus == 1090 + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.local_ssd_gb == 1244 + assert response.memory_mb == 967 + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetNodeTypeRequest): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "node_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeType() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeType.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "node_type": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + node_type='node_type_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeTypes/{node_type}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetNodeTypeRequest(), + project='project_value', + zone='zone_value', + node_type='node_type_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListNodeTypesRequest): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTypeList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTypeList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListNodeTypesRequest): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NodeTypeList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NodeTypeList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/nodeTypes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListNodeTypesRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NodeTypeList( + items=[ + compute.NodeType(), + compute.NodeType(), + compute.NodeType(), + ], + next_page_token='abc', + ), + compute.NodeTypeList( + items=[], + next_page_token='def', + ), + compute.NodeTypeList( + items=[ + compute.NodeType(), + ], + next_page_token='ghi', + ), + compute.NodeTypeList( + items=[ + compute.NodeType(), + compute.NodeType(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NodeTypeList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NodeType) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NodeTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NodeTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeTypesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NodeTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeTypesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NodeTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NodeTypesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.NodeTypesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_node_types_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NodeTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_node_types_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.node_types.transports.NodeTypesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.NodeTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_node_types_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.node_types.transports.NodeTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeTypesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_node_types_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.node_types.transports.NodeTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeTypesTransport() + adc.assert_called_once() + + +def test_node_types_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NodeTypesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_node_types_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.NodeTypesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_node_types_host_no_port(): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_node_types_host_with_port(): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = NodeTypesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = NodeTypesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTypesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = NodeTypesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = NodeTypesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTypesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = NodeTypesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = NodeTypesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTypesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = NodeTypesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = NodeTypesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTypesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = NodeTypesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = NodeTypesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NodeTypesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.NodeTypesTransport, '_prep_wrapped_messages') as prep: + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.NodeTypesTransport, '_prep_wrapped_messages') as prep: + transport_class = NodeTypesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = NodeTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_packet_mirrorings.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_packet_mirrorings.py new file mode 100644 index 000000000..4c7076b15 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_packet_mirrorings.py @@ -0,0 +1,1698 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.packet_mirrorings import PacketMirroringsClient +from google.cloud.compute_v1.services.packet_mirrorings import pagers +from google.cloud.compute_v1.services.packet_mirrorings import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PacketMirroringsClient._get_default_mtls_endpoint(None) is None + assert PacketMirroringsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PacketMirroringsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PacketMirroringsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PacketMirroringsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PacketMirroringsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + PacketMirroringsClient, +]) +def test_packet_mirrorings_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PacketMirroringsRestTransport, "rest"), +]) +def test_packet_mirrorings_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + PacketMirroringsClient, +]) +def test_packet_mirrorings_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_packet_mirrorings_client_get_transport_class(): + transport = PacketMirroringsClient.get_transport_class() + available_transports = [ + transports.PacketMirroringsRestTransport, + ] + assert transport in available_transports + + transport = PacketMirroringsClient.get_transport_class("rest") + assert transport == transports.PacketMirroringsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PacketMirroringsClient, transports.PacketMirroringsRestTransport, "rest"), +]) +@mock.patch.object(PacketMirroringsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PacketMirroringsClient)) +def test_packet_mirrorings_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PacketMirroringsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PacketMirroringsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PacketMirroringsClient, transports.PacketMirroringsRestTransport, "rest", "true"), + (PacketMirroringsClient, transports.PacketMirroringsRestTransport, "rest", "false"), +]) +@mock.patch.object(PacketMirroringsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PacketMirroringsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_packet_mirrorings_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PacketMirroringsClient, transports.PacketMirroringsRestTransport, "rest"), +]) +def test_packet_mirrorings_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PacketMirroringsClient, transports.PacketMirroringsRestTransport, "rest"), +]) +def test_packet_mirrorings_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListPacketMirroringsRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PacketMirroringAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PacketMirroringAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListPacketMirroringsRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PacketMirroringAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PacketMirroringAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/packetMirrorings" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListPacketMirroringsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.PacketMirroringAggregatedList( + items={ + 'a':compute.PacketMirroringsScopedList(), + 'b':compute.PacketMirroringsScopedList(), + 'c':compute.PacketMirroringsScopedList(), + }, + next_page_token='abc', + ), + compute.PacketMirroringAggregatedList( + items={}, + next_page_token='def', + ), + compute.PacketMirroringAggregatedList( + items={ + 'g':compute.PacketMirroringsScopedList(), + }, + next_page_token='ghi', + ), + compute.PacketMirroringAggregatedList( + items={ + 'h':compute.PacketMirroringsScopedList(), + 'i':compute.PacketMirroringsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.PacketMirroringAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.PacketMirroringsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.PacketMirroringsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.PacketMirroringsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeletePacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeletePacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + packet_mirroring='packet_mirroring_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{packet_mirroring}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeletePacketMirroringRequest(), + project='project_value', + region='region_value', + packet_mirroring='packet_mirroring_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PacketMirroring( + creation_timestamp='creation_timestamp_value', + description='description_value', + enable='enable_value', + id=205, + kind='kind_value', + name='name_value', + priority=898, + region='region_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PacketMirroring.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.PacketMirroring) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.enable == 'enable_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.priority == 898 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PacketMirroring() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PacketMirroring.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + packet_mirroring='packet_mirroring_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{packet_mirroring}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetPacketMirroringRequest(), + project='project_value', + region='region_value', + packet_mirroring='packet_mirroring_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["packet_mirroring_resource"] = compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["packet_mirroring_resource"] = compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + packet_mirroring_resource=compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/packetMirrorings" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertPacketMirroringRequest(), + project='project_value', + region='region_value', + packet_mirroring_resource=compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListPacketMirroringsRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PacketMirroringList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PacketMirroringList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListPacketMirroringsRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PacketMirroringList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PacketMirroringList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/packetMirrorings" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListPacketMirroringsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.PacketMirroringList( + items=[ + compute.PacketMirroring(), + compute.PacketMirroring(), + compute.PacketMirroring(), + ], + next_page_token='abc', + ), + compute.PacketMirroringList( + items=[], + next_page_token='def', + ), + compute.PacketMirroringList( + items=[ + compute.PacketMirroring(), + ], + next_page_token='ghi', + ), + compute.PacketMirroringList( + items=[ + compute.PacketMirroring(), + compute.PacketMirroring(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.PacketMirroringList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.PacketMirroring) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + request_init["packet_mirroring_resource"] = compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + request_init["packet_mirroring_resource"] = compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "packet_mirroring": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + packet_mirroring='packet_mirroring_value', + packet_mirroring_resource=compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{packet_mirroring}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchPacketMirroringRequest(), + project='project_value', + region='region_value', + packet_mirroring='packet_mirroring_value', + packet_mirroring_resource=compute.PacketMirroring(collector_ilb=compute.PacketMirroringForwardingRuleInfo(canonical_url='canonical_url_value')), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsPacketMirroringRequest): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/packetMirrorings/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsPacketMirroringRequest(), + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PacketMirroringsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PacketMirroringsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PacketMirroringsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PacketMirroringsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PacketMirroringsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PacketMirroringsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PacketMirroringsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.PacketMirroringsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_packet_mirrorings_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PacketMirroringsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_packet_mirrorings_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.packet_mirrorings.transports.PacketMirroringsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PacketMirroringsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_packet_mirrorings_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.packet_mirrorings.transports.PacketMirroringsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PacketMirroringsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_packet_mirrorings_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.packet_mirrorings.transports.PacketMirroringsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PacketMirroringsTransport() + adc.assert_called_once() + + +def test_packet_mirrorings_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PacketMirroringsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_packet_mirrorings_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.PacketMirroringsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_packet_mirrorings_host_no_port(): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_packet_mirrorings_host_with_port(): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PacketMirroringsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = PacketMirroringsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PacketMirroringsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = PacketMirroringsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = PacketMirroringsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PacketMirroringsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PacketMirroringsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = PacketMirroringsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PacketMirroringsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = PacketMirroringsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = PacketMirroringsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PacketMirroringsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PacketMirroringsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = PacketMirroringsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PacketMirroringsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PacketMirroringsTransport, '_prep_wrapped_messages') as prep: + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PacketMirroringsTransport, '_prep_wrapped_messages') as prep: + transport_class = PacketMirroringsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = PacketMirroringsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_projects.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_projects.py new file mode 100644 index 000000000..808faf866 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_projects.py @@ -0,0 +1,2591 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.projects import ProjectsClient +from google.cloud.compute_v1.services.projects import pagers +from google.cloud.compute_v1.services.projects import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ProjectsClient._get_default_mtls_endpoint(None) is None + assert ProjectsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ProjectsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ProjectsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ProjectsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ProjectsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ProjectsClient, +]) +def test_projects_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ProjectsRestTransport, "rest"), +]) +def test_projects_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ProjectsClient, +]) +def test_projects_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_projects_client_get_transport_class(): + transport = ProjectsClient.get_transport_class() + available_transports = [ + transports.ProjectsRestTransport, + ] + assert transport in available_transports + + transport = ProjectsClient.get_transport_class("rest") + assert transport == transports.ProjectsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ProjectsClient, transports.ProjectsRestTransport, "rest"), +]) +@mock.patch.object(ProjectsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ProjectsClient)) +def test_projects_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ProjectsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ProjectsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ProjectsClient, transports.ProjectsRestTransport, "rest", "true"), + (ProjectsClient, transports.ProjectsRestTransport, "rest", "false"), +]) +@mock.patch.object(ProjectsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ProjectsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_projects_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ProjectsClient, transports.ProjectsRestTransport, "rest"), +]) +def test_projects_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ProjectsClient, transports.ProjectsRestTransport, "rest"), +]) +def test_projects_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_disable_xpn_host_rest(transport: str = 'rest', request_type=compute.DisableXpnHostProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.disable_xpn_host(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_disable_xpn_host_rest_bad_request(transport: str = 'rest', request_type=compute.DisableXpnHostProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.disable_xpn_host(request) + + +def test_disable_xpn_host_rest_from_dict(): + test_disable_xpn_host_rest(request_type=dict) + + +def test_disable_xpn_host_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.disable_xpn_host(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/disableXpnHost" % client.transport._host, args[1]) + + +def test_disable_xpn_host_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.disable_xpn_host( + compute.DisableXpnHostProjectRequest(), + project='project_value', + ) + + +def test_disable_xpn_resource_rest(transport: str = 'rest', request_type=compute.DisableXpnResourceProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_disable_xpn_resource_request_resource"] = compute.ProjectsDisableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.disable_xpn_resource(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_disable_xpn_resource_rest_bad_request(transport: str = 'rest', request_type=compute.DisableXpnResourceProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_disable_xpn_resource_request_resource"] = compute.ProjectsDisableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.disable_xpn_resource(request) + + +def test_disable_xpn_resource_rest_from_dict(): + test_disable_xpn_resource_rest(request_type=dict) + + +def test_disable_xpn_resource_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + projects_disable_xpn_resource_request_resource=compute.ProjectsDisableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')), + ) + mock_args.update(sample_request) + client.disable_xpn_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/disableXpnResource" % client.transport._host, args[1]) + + +def test_disable_xpn_resource_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.disable_xpn_resource( + compute.DisableXpnResourceProjectRequest(), + project='project_value', + projects_disable_xpn_resource_request_resource=compute.ProjectsDisableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')), + ) + + +def test_enable_xpn_host_rest(transport: str = 'rest', request_type=compute.EnableXpnHostProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.enable_xpn_host(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_enable_xpn_host_rest_bad_request(transport: str = 'rest', request_type=compute.EnableXpnHostProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.enable_xpn_host(request) + + +def test_enable_xpn_host_rest_from_dict(): + test_enable_xpn_host_rest(request_type=dict) + + +def test_enable_xpn_host_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.enable_xpn_host(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/enableXpnHost" % client.transport._host, args[1]) + + +def test_enable_xpn_host_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.enable_xpn_host( + compute.EnableXpnHostProjectRequest(), + project='project_value', + ) + + +def test_enable_xpn_resource_rest(transport: str = 'rest', request_type=compute.EnableXpnResourceProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_enable_xpn_resource_request_resource"] = compute.ProjectsEnableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.enable_xpn_resource(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_enable_xpn_resource_rest_bad_request(transport: str = 'rest', request_type=compute.EnableXpnResourceProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_enable_xpn_resource_request_resource"] = compute.ProjectsEnableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.enable_xpn_resource(request) + + +def test_enable_xpn_resource_rest_from_dict(): + test_enable_xpn_resource_rest(request_type=dict) + + +def test_enable_xpn_resource_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + projects_enable_xpn_resource_request_resource=compute.ProjectsEnableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')), + ) + mock_args.update(sample_request) + client.enable_xpn_resource(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/enableXpnResource" % client.transport._host, args[1]) + + +def test_enable_xpn_resource_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.enable_xpn_resource( + compute.EnableXpnResourceProjectRequest(), + project='project_value', + projects_enable_xpn_resource_request_resource=compute.ProjectsEnableXpnResourceRequest(xpn_resource=compute.XpnResourceId(id='id_value')), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Project( + creation_timestamp='creation_timestamp_value', + default_network_tier='default_network_tier_value', + default_service_account='default_service_account_value', + description='description_value', + enabled_features=['enabled_features_value'], + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + xpn_project_status='xpn_project_status_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Project.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Project) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_network_tier == 'default_network_tier_value' + assert response.default_service_account == 'default_service_account_value' + assert response.description == 'description_value' + assert response.enabled_features == ['enabled_features_value'] + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.xpn_project_status == 'xpn_project_status_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Project() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Project.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetProjectRequest(), + project='project_value', + ) + + +def test_get_xpn_host_rest(transport: str = 'rest', request_type=compute.GetXpnHostProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Project( + creation_timestamp='creation_timestamp_value', + default_network_tier='default_network_tier_value', + default_service_account='default_service_account_value', + description='description_value', + enabled_features=['enabled_features_value'], + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + xpn_project_status='xpn_project_status_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Project.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_xpn_host(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Project) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_network_tier == 'default_network_tier_value' + assert response.default_service_account == 'default_service_account_value' + assert response.description == 'description_value' + assert response.enabled_features == ['enabled_features_value'] + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.xpn_project_status == 'xpn_project_status_value' + + +def test_get_xpn_host_rest_bad_request(transport: str = 'rest', request_type=compute.GetXpnHostProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_xpn_host(request) + + +def test_get_xpn_host_rest_from_dict(): + test_get_xpn_host_rest(request_type=dict) + + +def test_get_xpn_host_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Project() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Project.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.get_xpn_host(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/getXpnHost" % client.transport._host, args[1]) + + +def test_get_xpn_host_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_xpn_host( + compute.GetXpnHostProjectRequest(), + project='project_value', + ) + + +def test_get_xpn_resources_rest(transport: str = 'rest', request_type=compute.GetXpnResourcesProjectsRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ProjectsGetXpnResources( + kind='kind_value', + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ProjectsGetXpnResources.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_xpn_resources(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.GetXpnResourcesPager) + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + + +def test_get_xpn_resources_rest_bad_request(transport: str = 'rest', request_type=compute.GetXpnResourcesProjectsRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_xpn_resources(request) + + +def test_get_xpn_resources_rest_from_dict(): + test_get_xpn_resources_rest(request_type=dict) + + +def test_get_xpn_resources_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ProjectsGetXpnResources() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ProjectsGetXpnResources.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.get_xpn_resources(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/getXpnResources" % client.transport._host, args[1]) + + +def test_get_xpn_resources_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_xpn_resources( + compute.GetXpnResourcesProjectsRequest(), + project='project_value', + ) + + +def test_get_xpn_resources_rest_pager(): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ProjectsGetXpnResources( + resources=[ + compute.XpnResourceId(), + compute.XpnResourceId(), + compute.XpnResourceId(), + ], + next_page_token='abc', + ), + compute.ProjectsGetXpnResources( + resources=[], + next_page_token='def', + ), + compute.ProjectsGetXpnResources( + resources=[ + compute.XpnResourceId(), + ], + next_page_token='ghi', + ), + compute.ProjectsGetXpnResources( + resources=[ + compute.XpnResourceId(), + compute.XpnResourceId(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ProjectsGetXpnResources.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.get_xpn_resources(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.XpnResourceId) + for i in results) + + pages = list(client.get_xpn_resources(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_xpn_hosts_rest(transport: str = 'rest', request_type=compute.ListXpnHostsProjectsRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_list_xpn_hosts_request_resource"] = compute.ProjectsListXpnHostsRequest(organization='organization_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.XpnHostList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.XpnHostList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_xpn_hosts(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListXpnHostsPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_xpn_hosts_rest_bad_request(transport: str = 'rest', request_type=compute.ListXpnHostsProjectsRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_list_xpn_hosts_request_resource"] = compute.ProjectsListXpnHostsRequest(organization='organization_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_xpn_hosts(request) + + +def test_list_xpn_hosts_rest_from_dict(): + test_list_xpn_hosts_rest(request_type=dict) + + +def test_list_xpn_hosts_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.XpnHostList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.XpnHostList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + projects_list_xpn_hosts_request_resource=compute.ProjectsListXpnHostsRequest(organization='organization_value'), + ) + mock_args.update(sample_request) + client.list_xpn_hosts(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/listXpnHosts" % client.transport._host, args[1]) + + +def test_list_xpn_hosts_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_xpn_hosts( + compute.ListXpnHostsProjectsRequest(), + project='project_value', + projects_list_xpn_hosts_request_resource=compute.ProjectsListXpnHostsRequest(organization='organization_value'), + ) + + +def test_list_xpn_hosts_rest_pager(): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.XpnHostList( + items=[ + compute.Project(), + compute.Project(), + compute.Project(), + ], + next_page_token='abc', + ), + compute.XpnHostList( + items=[], + next_page_token='def', + ), + compute.XpnHostList( + items=[ + compute.Project(), + ], + next_page_token='ghi', + ), + compute.XpnHostList( + items=[ + compute.Project(), + compute.Project(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.XpnHostList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + sample_request["projects_list_xpn_hosts_request_resource"] = compute.ProjectsListXpnHostsRequest(organization='organization_value') + + pager = client.list_xpn_hosts(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Project) + for i in results) + + pages = list(client.list_xpn_hosts(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_move_disk_rest(transport: str = 'rest', request_type=compute.MoveDiskProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["disk_move_request_resource"] = compute.DiskMoveRequest(destination_zone='destination_zone_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.move_disk(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_move_disk_rest_bad_request(transport: str = 'rest', request_type=compute.MoveDiskProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["disk_move_request_resource"] = compute.DiskMoveRequest(destination_zone='destination_zone_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.move_disk(request) + + +def test_move_disk_rest_from_dict(): + test_move_disk_rest(request_type=dict) + + +def test_move_disk_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + disk_move_request_resource=compute.DiskMoveRequest(destination_zone='destination_zone_value'), + ) + mock_args.update(sample_request) + client.move_disk(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/moveDisk" % client.transport._host, args[1]) + + +def test_move_disk_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.move_disk( + compute.MoveDiskProjectRequest(), + project='project_value', + disk_move_request_resource=compute.DiskMoveRequest(destination_zone='destination_zone_value'), + ) + + +def test_move_instance_rest(transport: str = 'rest', request_type=compute.MoveInstanceProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["instance_move_request_resource"] = compute.InstanceMoveRequest(destination_zone='destination_zone_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.move_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_move_instance_rest_bad_request(transport: str = 'rest', request_type=compute.MoveInstanceProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["instance_move_request_resource"] = compute.InstanceMoveRequest(destination_zone='destination_zone_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.move_instance(request) + + +def test_move_instance_rest_from_dict(): + test_move_instance_rest(request_type=dict) + + +def test_move_instance_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + instance_move_request_resource=compute.InstanceMoveRequest(destination_zone='destination_zone_value'), + ) + mock_args.update(sample_request) + client.move_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/moveInstance" % client.transport._host, args[1]) + + +def test_move_instance_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.move_instance( + compute.MoveInstanceProjectRequest(), + project='project_value', + instance_move_request_resource=compute.InstanceMoveRequest(destination_zone='destination_zone_value'), + ) + + +def test_set_common_instance_metadata_rest(transport: str = 'rest', request_type=compute.SetCommonInstanceMetadataProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["metadata_resource"] = compute.Metadata(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_common_instance_metadata(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_common_instance_metadata_rest_bad_request(transport: str = 'rest', request_type=compute.SetCommonInstanceMetadataProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["metadata_resource"] = compute.Metadata(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_common_instance_metadata(request) + + +def test_set_common_instance_metadata_rest_from_dict(): + test_set_common_instance_metadata_rest(request_type=dict) + + +def test_set_common_instance_metadata_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + metadata_resource=compute.Metadata(fingerprint='fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_common_instance_metadata(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/setCommonInstanceMetadata" % client.transport._host, args[1]) + + +def test_set_common_instance_metadata_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_common_instance_metadata( + compute.SetCommonInstanceMetadataProjectRequest(), + project='project_value', + metadata_resource=compute.Metadata(fingerprint='fingerprint_value'), + ) + + +def test_set_default_network_tier_rest(transport: str = 'rest', request_type=compute.SetDefaultNetworkTierProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_set_default_network_tier_request_resource"] = compute.ProjectsSetDefaultNetworkTierRequest(network_tier='network_tier_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_default_network_tier(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_default_network_tier_rest_bad_request(transport: str = 'rest', request_type=compute.SetDefaultNetworkTierProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["projects_set_default_network_tier_request_resource"] = compute.ProjectsSetDefaultNetworkTierRequest(network_tier='network_tier_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_default_network_tier(request) + + +def test_set_default_network_tier_rest_from_dict(): + test_set_default_network_tier_rest(request_type=dict) + + +def test_set_default_network_tier_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + projects_set_default_network_tier_request_resource=compute.ProjectsSetDefaultNetworkTierRequest(network_tier='network_tier_value'), + ) + mock_args.update(sample_request) + client.set_default_network_tier(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/setDefaultNetworkTier" % client.transport._host, args[1]) + + +def test_set_default_network_tier_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_default_network_tier( + compute.SetDefaultNetworkTierProjectRequest(), + project='project_value', + projects_set_default_network_tier_request_resource=compute.ProjectsSetDefaultNetworkTierRequest(network_tier='network_tier_value'), + ) + + +def test_set_usage_export_bucket_rest(transport: str = 'rest', request_type=compute.SetUsageExportBucketProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["usage_export_location_resource"] = compute.UsageExportLocation(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_usage_export_bucket(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_usage_export_bucket_rest_bad_request(transport: str = 'rest', request_type=compute.SetUsageExportBucketProjectRequest): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["usage_export_location_resource"] = compute.UsageExportLocation(bucket_name='bucket_name_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_usage_export_bucket(request) + + +def test_set_usage_export_bucket_rest_from_dict(): + test_set_usage_export_bucket_rest(request_type=dict) + + +def test_set_usage_export_bucket_rest_flattened(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + usage_export_location_resource=compute.UsageExportLocation(bucket_name='bucket_name_value'), + ) + mock_args.update(sample_request) + client.set_usage_export_bucket(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/setUsageExportBucket" % client.transport._host, args[1]) + + +def test_set_usage_export_bucket_rest_flattened_error(transport: str = 'rest'): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_usage_export_bucket( + compute.SetUsageExportBucketProjectRequest(), + project='project_value', + usage_export_location_resource=compute.UsageExportLocation(bucket_name='bucket_name_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ProjectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ProjectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ProjectsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ProjectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ProjectsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ProjectsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ProjectsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ProjectsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_projects_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ProjectsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_projects_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.projects.transports.ProjectsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ProjectsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'disable_xpn_host', + 'disable_xpn_resource', + 'enable_xpn_host', + 'enable_xpn_resource', + 'get', + 'get_xpn_host', + 'get_xpn_resources', + 'list_xpn_hosts', + 'move_disk', + 'move_instance', + 'set_common_instance_metadata', + 'set_default_network_tier', + 'set_usage_export_bucket', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_projects_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.projects.transports.ProjectsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ProjectsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_projects_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.projects.transports.ProjectsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ProjectsTransport() + adc.assert_called_once() + + +def test_projects_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ProjectsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_projects_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ProjectsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_projects_host_no_port(): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_projects_host_with_port(): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ProjectsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ProjectsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ProjectsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ProjectsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ProjectsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ProjectsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ProjectsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ProjectsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ProjectsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ProjectsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ProjectsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ProjectsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ProjectsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ProjectsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ProjectsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ProjectsTransport, '_prep_wrapped_messages') as prep: + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ProjectsTransport, '_prep_wrapped_messages') as prep: + transport_class = ProjectsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ProjectsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_public_advertised_prefixes.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_public_advertised_prefixes.py new file mode 100644 index 000000000..e996935ea --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_public_advertised_prefixes.py @@ -0,0 +1,1393 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.public_advertised_prefixes import PublicAdvertisedPrefixesClient +from google.cloud.compute_v1.services.public_advertised_prefixes import pagers +from google.cloud.compute_v1.services.public_advertised_prefixes import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PublicAdvertisedPrefixesClient._get_default_mtls_endpoint(None) is None + assert PublicAdvertisedPrefixesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PublicAdvertisedPrefixesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PublicAdvertisedPrefixesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PublicAdvertisedPrefixesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PublicAdvertisedPrefixesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + PublicAdvertisedPrefixesClient, +]) +def test_public_advertised_prefixes_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PublicAdvertisedPrefixesRestTransport, "rest"), +]) +def test_public_advertised_prefixes_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + PublicAdvertisedPrefixesClient, +]) +def test_public_advertised_prefixes_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_public_advertised_prefixes_client_get_transport_class(): + transport = PublicAdvertisedPrefixesClient.get_transport_class() + available_transports = [ + transports.PublicAdvertisedPrefixesRestTransport, + ] + assert transport in available_transports + + transport = PublicAdvertisedPrefixesClient.get_transport_class("rest") + assert transport == transports.PublicAdvertisedPrefixesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PublicAdvertisedPrefixesClient, transports.PublicAdvertisedPrefixesRestTransport, "rest"), +]) +@mock.patch.object(PublicAdvertisedPrefixesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PublicAdvertisedPrefixesClient)) +def test_public_advertised_prefixes_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PublicAdvertisedPrefixesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PublicAdvertisedPrefixesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PublicAdvertisedPrefixesClient, transports.PublicAdvertisedPrefixesRestTransport, "rest", "true"), + (PublicAdvertisedPrefixesClient, transports.PublicAdvertisedPrefixesRestTransport, "rest", "false"), +]) +@mock.patch.object(PublicAdvertisedPrefixesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PublicAdvertisedPrefixesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_public_advertised_prefixes_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PublicAdvertisedPrefixesClient, transports.PublicAdvertisedPrefixesRestTransport, "rest"), +]) +def test_public_advertised_prefixes_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PublicAdvertisedPrefixesClient, transports.PublicAdvertisedPrefixesRestTransport, "rest"), +]) +def test_public_advertised_prefixes_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeletePublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_advertised_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeletePublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_advertised_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "public_advertised_prefix": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_advertised_prefix='public_advertised_prefix_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicAdvertisedPrefixes/{public_advertised_prefix}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeletePublicAdvertisedPrefixeRequest(), + project='project_value', + public_advertised_prefix='public_advertised_prefix_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetPublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_advertised_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicAdvertisedPrefix( + creation_timestamp='creation_timestamp_value', + description='description_value', + dns_verification_ip='dns_verification_ip_value', + fingerprint='fingerprint_value', + id=205, + ip_cidr_range='ip_cidr_range_value', + kind='kind_value', + name='name_value', + self_link='self_link_value', + shared_secret='shared_secret_value', + status='status_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicAdvertisedPrefix.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.PublicAdvertisedPrefix) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.dns_verification_ip == 'dns_verification_ip_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.ip_cidr_range == 'ip_cidr_range_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.shared_secret == 'shared_secret_value' + assert response.status == 'status_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetPublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_advertised_prefix": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicAdvertisedPrefix() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicAdvertisedPrefix.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "public_advertised_prefix": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_advertised_prefix='public_advertised_prefix_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicAdvertisedPrefixes/{public_advertised_prefix}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetPublicAdvertisedPrefixeRequest(), + project='project_value', + public_advertised_prefix='public_advertised_prefix_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertPublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["public_advertised_prefix_resource"] = compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertPublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["public_advertised_prefix_resource"] = compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_advertised_prefix_resource=compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicAdvertisedPrefixes" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertPublicAdvertisedPrefixeRequest(), + project='project_value', + public_advertised_prefix_resource=compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListPublicAdvertisedPrefixesRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicAdvertisedPrefixList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicAdvertisedPrefixList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListPublicAdvertisedPrefixesRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicAdvertisedPrefixList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicAdvertisedPrefixList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicAdvertisedPrefixes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListPublicAdvertisedPrefixesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.PublicAdvertisedPrefixList( + items=[ + compute.PublicAdvertisedPrefix(), + compute.PublicAdvertisedPrefix(), + compute.PublicAdvertisedPrefix(), + ], + next_page_token='abc', + ), + compute.PublicAdvertisedPrefixList( + items=[], + next_page_token='def', + ), + compute.PublicAdvertisedPrefixList( + items=[ + compute.PublicAdvertisedPrefix(), + ], + next_page_token='ghi', + ), + compute.PublicAdvertisedPrefixList( + items=[ + compute.PublicAdvertisedPrefix(), + compute.PublicAdvertisedPrefix(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.PublicAdvertisedPrefixList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.PublicAdvertisedPrefix) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchPublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_advertised_prefix": "sample2"} + request_init["public_advertised_prefix_resource"] = compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchPublicAdvertisedPrefixeRequest): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "public_advertised_prefix": "sample2"} + request_init["public_advertised_prefix_resource"] = compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "public_advertised_prefix": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + public_advertised_prefix='public_advertised_prefix_value', + public_advertised_prefix_resource=compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/publicAdvertisedPrefixes/{public_advertised_prefix}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchPublicAdvertisedPrefixeRequest(), + project='project_value', + public_advertised_prefix='public_advertised_prefix_value', + public_advertised_prefix_resource=compute.PublicAdvertisedPrefix(creation_timestamp='creation_timestamp_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PublicAdvertisedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PublicAdvertisedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PublicAdvertisedPrefixesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PublicAdvertisedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PublicAdvertisedPrefixesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PublicAdvertisedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PublicAdvertisedPrefixesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.PublicAdvertisedPrefixesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_public_advertised_prefixes_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PublicAdvertisedPrefixesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_public_advertised_prefixes_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.public_advertised_prefixes.transports.PublicAdvertisedPrefixesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PublicAdvertisedPrefixesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_public_advertised_prefixes_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.public_advertised_prefixes.transports.PublicAdvertisedPrefixesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PublicAdvertisedPrefixesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_public_advertised_prefixes_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.public_advertised_prefixes.transports.PublicAdvertisedPrefixesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PublicAdvertisedPrefixesTransport() + adc.assert_called_once() + + +def test_public_advertised_prefixes_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PublicAdvertisedPrefixesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_public_advertised_prefixes_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.PublicAdvertisedPrefixesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_public_advertised_prefixes_host_no_port(): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_public_advertised_prefixes_host_with_port(): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PublicAdvertisedPrefixesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = PublicAdvertisedPrefixesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PublicAdvertisedPrefixesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = PublicAdvertisedPrefixesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = PublicAdvertisedPrefixesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PublicAdvertisedPrefixesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PublicAdvertisedPrefixesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = PublicAdvertisedPrefixesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PublicAdvertisedPrefixesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = PublicAdvertisedPrefixesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = PublicAdvertisedPrefixesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PublicAdvertisedPrefixesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PublicAdvertisedPrefixesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = PublicAdvertisedPrefixesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PublicAdvertisedPrefixesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PublicAdvertisedPrefixesTransport, '_prep_wrapped_messages') as prep: + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PublicAdvertisedPrefixesTransport, '_prep_wrapped_messages') as prep: + transport_class = PublicAdvertisedPrefixesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = PublicAdvertisedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_public_delegated_prefixes.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_public_delegated_prefixes.py new file mode 100644 index 000000000..e5da75108 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_public_delegated_prefixes.py @@ -0,0 +1,1590 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.public_delegated_prefixes import PublicDelegatedPrefixesClient +from google.cloud.compute_v1.services.public_delegated_prefixes import pagers +from google.cloud.compute_v1.services.public_delegated_prefixes import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert PublicDelegatedPrefixesClient._get_default_mtls_endpoint(None) is None + assert PublicDelegatedPrefixesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert PublicDelegatedPrefixesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert PublicDelegatedPrefixesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert PublicDelegatedPrefixesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert PublicDelegatedPrefixesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + PublicDelegatedPrefixesClient, +]) +def test_public_delegated_prefixes_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.PublicDelegatedPrefixesRestTransport, "rest"), +]) +def test_public_delegated_prefixes_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + PublicDelegatedPrefixesClient, +]) +def test_public_delegated_prefixes_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_public_delegated_prefixes_client_get_transport_class(): + transport = PublicDelegatedPrefixesClient.get_transport_class() + available_transports = [ + transports.PublicDelegatedPrefixesRestTransport, + ] + assert transport in available_transports + + transport = PublicDelegatedPrefixesClient.get_transport_class("rest") + assert transport == transports.PublicDelegatedPrefixesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PublicDelegatedPrefixesClient, transports.PublicDelegatedPrefixesRestTransport, "rest"), +]) +@mock.patch.object(PublicDelegatedPrefixesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PublicDelegatedPrefixesClient)) +def test_public_delegated_prefixes_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(PublicDelegatedPrefixesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(PublicDelegatedPrefixesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (PublicDelegatedPrefixesClient, transports.PublicDelegatedPrefixesRestTransport, "rest", "true"), + (PublicDelegatedPrefixesClient, transports.PublicDelegatedPrefixesRestTransport, "rest", "false"), +]) +@mock.patch.object(PublicDelegatedPrefixesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(PublicDelegatedPrefixesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_public_delegated_prefixes_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PublicDelegatedPrefixesClient, transports.PublicDelegatedPrefixesRestTransport, "rest"), +]) +def test_public_delegated_prefixes_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (PublicDelegatedPrefixesClient, transports.PublicDelegatedPrefixesRestTransport, "rest"), +]) +def test_public_delegated_prefixes_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListPublicDelegatedPrefixesRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefixAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefixAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListPublicDelegatedPrefixesRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefixAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefixAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/publicDelegatedPrefixes" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListPublicDelegatedPrefixesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.PublicDelegatedPrefixAggregatedList( + items={ + 'a':compute.PublicDelegatedPrefixesScopedList(), + 'b':compute.PublicDelegatedPrefixesScopedList(), + 'c':compute.PublicDelegatedPrefixesScopedList(), + }, + next_page_token='abc', + ), + compute.PublicDelegatedPrefixAggregatedList( + items={}, + next_page_token='def', + ), + compute.PublicDelegatedPrefixAggregatedList( + items={ + 'g':compute.PublicDelegatedPrefixesScopedList(), + }, + next_page_token='ghi', + ), + compute.PublicDelegatedPrefixAggregatedList( + items={ + 'h':compute.PublicDelegatedPrefixesScopedList(), + 'i':compute.PublicDelegatedPrefixesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.PublicDelegatedPrefixAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.PublicDelegatedPrefixesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.PublicDelegatedPrefixesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.PublicDelegatedPrefixesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeletePublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeletePublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes/{public_delegated_prefix}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeletePublicDelegatedPrefixeRequest(), + project='project_value', + region='region_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetPublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefix( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + ip_cidr_range='ip_cidr_range_value', + is_live_migration=True, + kind='kind_value', + name='name_value', + parent_prefix='parent_prefix_value', + region='region_value', + self_link='self_link_value', + status='status_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefix.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.PublicDelegatedPrefix) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.ip_cidr_range == 'ip_cidr_range_value' + assert response.is_live_migration is True + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.parent_prefix == 'parent_prefix_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetPublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefix() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefix.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes/{public_delegated_prefix}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetPublicDelegatedPrefixeRequest(), + project='project_value', + region='region_value', + public_delegated_prefix='public_delegated_prefix_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertPublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertPublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertPublicDelegatedPrefixeRequest(), + project='project_value', + region='region_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListPublicDelegatedPrefixesRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefixList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefixList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListPublicDelegatedPrefixesRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.PublicDelegatedPrefixList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.PublicDelegatedPrefixList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListPublicDelegatedPrefixesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.PublicDelegatedPrefixList( + items=[ + compute.PublicDelegatedPrefix(), + compute.PublicDelegatedPrefix(), + compute.PublicDelegatedPrefix(), + ], + next_page_token='abc', + ), + compute.PublicDelegatedPrefixList( + items=[], + next_page_token='def', + ), + compute.PublicDelegatedPrefixList( + items=[ + compute.PublicDelegatedPrefix(), + ], + next_page_token='ghi', + ), + compute.PublicDelegatedPrefixList( + items=[ + compute.PublicDelegatedPrefix(), + compute.PublicDelegatedPrefix(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.PublicDelegatedPrefixList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.PublicDelegatedPrefix) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchPublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchPublicDelegatedPrefixeRequest): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + request_init["public_delegated_prefix_resource"] = compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "public_delegated_prefix": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + public_delegated_prefix='public_delegated_prefix_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/publicDelegatedPrefixes/{public_delegated_prefix}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchPublicDelegatedPrefixeRequest(), + project='project_value', + region='region_value', + public_delegated_prefix='public_delegated_prefix_value', + public_delegated_prefix_resource=compute.PublicDelegatedPrefix(creation_timestamp='creation_timestamp_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.PublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.PublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PublicDelegatedPrefixesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.PublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = PublicDelegatedPrefixesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.PublicDelegatedPrefixesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = PublicDelegatedPrefixesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.PublicDelegatedPrefixesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_public_delegated_prefixes_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.PublicDelegatedPrefixesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_public_delegated_prefixes_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.public_delegated_prefixes.transports.PublicDelegatedPrefixesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.PublicDelegatedPrefixesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_public_delegated_prefixes_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.public_delegated_prefixes.transports.PublicDelegatedPrefixesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PublicDelegatedPrefixesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_public_delegated_prefixes_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.public_delegated_prefixes.transports.PublicDelegatedPrefixesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.PublicDelegatedPrefixesTransport() + adc.assert_called_once() + + +def test_public_delegated_prefixes_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + PublicDelegatedPrefixesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_public_delegated_prefixes_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.PublicDelegatedPrefixesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_public_delegated_prefixes_host_no_port(): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_public_delegated_prefixes_host_with_port(): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = PublicDelegatedPrefixesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = PublicDelegatedPrefixesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = PublicDelegatedPrefixesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = PublicDelegatedPrefixesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = PublicDelegatedPrefixesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = PublicDelegatedPrefixesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = PublicDelegatedPrefixesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = PublicDelegatedPrefixesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = PublicDelegatedPrefixesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = PublicDelegatedPrefixesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = PublicDelegatedPrefixesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = PublicDelegatedPrefixesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = PublicDelegatedPrefixesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = PublicDelegatedPrefixesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = PublicDelegatedPrefixesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.PublicDelegatedPrefixesTransport, '_prep_wrapped_messages') as prep: + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.PublicDelegatedPrefixesTransport, '_prep_wrapped_messages') as prep: + transport_class = PublicDelegatedPrefixesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = PublicDelegatedPrefixesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_autoscalers.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_autoscalers.py new file mode 100644 index 000000000..0ec2cf022 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_autoscalers.py @@ -0,0 +1,1555 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_autoscalers import RegionAutoscalersClient +from google.cloud.compute_v1.services.region_autoscalers import pagers +from google.cloud.compute_v1.services.region_autoscalers import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionAutoscalersClient._get_default_mtls_endpoint(None) is None + assert RegionAutoscalersClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionAutoscalersClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionAutoscalersClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionAutoscalersClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionAutoscalersClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionAutoscalersClient, +]) +def test_region_autoscalers_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionAutoscalersRestTransport, "rest"), +]) +def test_region_autoscalers_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionAutoscalersClient, +]) +def test_region_autoscalers_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_autoscalers_client_get_transport_class(): + transport = RegionAutoscalersClient.get_transport_class() + available_transports = [ + transports.RegionAutoscalersRestTransport, + ] + assert transport in available_transports + + transport = RegionAutoscalersClient.get_transport_class("rest") + assert transport == transports.RegionAutoscalersRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest"), +]) +@mock.patch.object(RegionAutoscalersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionAutoscalersClient)) +def test_region_autoscalers_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionAutoscalersClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionAutoscalersClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest", "true"), + (RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionAutoscalersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionAutoscalersClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_autoscalers_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest"), +]) +def test_region_autoscalers_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionAutoscalersClient, transports.RegionAutoscalersRestTransport, "rest"), +]) +def test_region_autoscalers_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "autoscaler": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + autoscaler='autoscaler_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/autoscalers/{autoscaler}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionAutoscalerRequest(), + project='project_value', + region='region_value', + autoscaler='autoscaler_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Autoscaler( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + recommended_size=1693, + region='region_value', + self_link='self_link_value', + status='status_value', + target='target_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Autoscaler.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Autoscaler) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.recommended_size == 1693 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.target == 'target_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "autoscaler": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Autoscaler() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Autoscaler.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "autoscaler": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + autoscaler='autoscaler_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/autoscalers/{autoscaler}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionAutoscalerRequest(), + project='project_value', + region='region_value', + autoscaler='autoscaler_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/autoscalers" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionAutoscalerRequest(), + project='project_value', + region='region_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionAutoscalersRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionAutoscalerList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionAutoscalerList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionAutoscalersRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionAutoscalerList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionAutoscalerList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/autoscalers" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionAutoscalersRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionAutoscalerList( + items=[ + compute.Autoscaler(), + compute.Autoscaler(), + compute.Autoscaler(), + ], + next_page_token='abc', + ), + compute.RegionAutoscalerList( + items=[], + next_page_token='def', + ), + compute.RegionAutoscalerList( + items=[ + compute.Autoscaler(), + ], + next_page_token='ghi', + ), + compute.RegionAutoscalerList( + items=[ + compute.Autoscaler(), + compute.Autoscaler(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionAutoscalerList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Autoscaler) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/autoscalers" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchRegionAutoscalerRequest(), + project='project_value', + region='region_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateRegionAutoscalerRequest): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["autoscaler_resource"] = compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/autoscalers" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateRegionAutoscalerRequest(), + project='project_value', + region='region_value', + autoscaler_resource=compute.Autoscaler(autoscaling_policy=compute.AutoscalingPolicy(cool_down_period_sec=2112)), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionAutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionAutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionAutoscalersClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionAutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionAutoscalersClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionAutoscalersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionAutoscalersClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionAutoscalersRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_autoscalers_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionAutoscalersTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_autoscalers_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_autoscalers.transports.RegionAutoscalersTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionAutoscalersTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_autoscalers_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_autoscalers.transports.RegionAutoscalersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionAutoscalersTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_autoscalers_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_autoscalers.transports.RegionAutoscalersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionAutoscalersTransport() + adc.assert_called_once() + + +def test_region_autoscalers_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionAutoscalersClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_autoscalers_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionAutoscalersRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_autoscalers_host_no_port(): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_autoscalers_host_with_port(): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionAutoscalersClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionAutoscalersClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionAutoscalersClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionAutoscalersClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionAutoscalersClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionAutoscalersClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionAutoscalersClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionAutoscalersClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionAutoscalersClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionAutoscalersClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionAutoscalersClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionAutoscalersClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionAutoscalersClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionAutoscalersClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionAutoscalersClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionAutoscalersTransport, '_prep_wrapped_messages') as prep: + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionAutoscalersTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionAutoscalersClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionAutoscalersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_backend_services.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_backend_services.py new file mode 100644 index 000000000..d80d2df7a --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_backend_services.py @@ -0,0 +1,1695 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_backend_services import RegionBackendServicesClient +from google.cloud.compute_v1.services.region_backend_services import pagers +from google.cloud.compute_v1.services.region_backend_services import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionBackendServicesClient._get_default_mtls_endpoint(None) is None + assert RegionBackendServicesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionBackendServicesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionBackendServicesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionBackendServicesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionBackendServicesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionBackendServicesClient, +]) +def test_region_backend_services_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionBackendServicesRestTransport, "rest"), +]) +def test_region_backend_services_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionBackendServicesClient, +]) +def test_region_backend_services_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_backend_services_client_get_transport_class(): + transport = RegionBackendServicesClient.get_transport_class() + available_transports = [ + transports.RegionBackendServicesRestTransport, + ] + assert transport in available_transports + + transport = RegionBackendServicesClient.get_transport_class("rest") + assert transport == transports.RegionBackendServicesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionBackendServicesClient, transports.RegionBackendServicesRestTransport, "rest"), +]) +@mock.patch.object(RegionBackendServicesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionBackendServicesClient)) +def test_region_backend_services_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionBackendServicesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionBackendServicesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionBackendServicesClient, transports.RegionBackendServicesRestTransport, "rest", "true"), + (RegionBackendServicesClient, transports.RegionBackendServicesRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionBackendServicesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionBackendServicesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_backend_services_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionBackendServicesClient, transports.RegionBackendServicesRestTransport, "rest"), +]) +def test_region_backend_services_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionBackendServicesClient, transports.RegionBackendServicesRestTransport, "rest"), +]) +def test_region_backend_services_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + backend_service='backend_service_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionBackendServiceRequest(), + project='project_value', + region='region_value', + backend_service='backend_service_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendService( + affinity_cookie_ttl_sec=2432, + creation_timestamp='creation_timestamp_value', + custom_request_headers=['custom_request_headers_value'], + custom_response_headers=['custom_response_headers_value'], + description='description_value', + enable_c_d_n=True, + fingerprint='fingerprint_value', + health_checks=['health_checks_value'], + id=205, + kind='kind_value', + load_balancing_scheme='load_balancing_scheme_value', + locality_lb_policy='locality_lb_policy_value', + name='name_value', + network='network_value', + port=453, + port_name='port_name_value', + protocol='protocol_value', + region='region_value', + security_policy='security_policy_value', + self_link='self_link_value', + session_affinity='session_affinity_value', + timeout_sec=1185, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendService.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.BackendService) + assert response.affinity_cookie_ttl_sec == 2432 + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.custom_request_headers == ['custom_request_headers_value'] + assert response.custom_response_headers == ['custom_response_headers_value'] + assert response.description == 'description_value' + assert response.enable_c_d_n is True + assert response.fingerprint == 'fingerprint_value' + assert response.health_checks == ['health_checks_value'] + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.load_balancing_scheme == 'load_balancing_scheme_value' + assert response.locality_lb_policy == 'locality_lb_policy_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.port == 453 + assert response.port_name == 'port_name_value' + assert response.protocol == 'protocol_value' + assert response.region == 'region_value' + assert response.security_policy == 'security_policy_value' + assert response.self_link == 'self_link_value' + assert response.session_affinity == 'session_affinity_value' + assert response.timeout_sec == 1185 + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendService() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendService.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + backend_service='backend_service_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionBackendServiceRequest(), + project='project_value', + region='region_value', + backend_service='backend_service_value', + ) + + +def test_get_health_rest(transport: str = 'rest', request_type=compute.GetHealthRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request_init["resource_group_reference_resource"] = compute.ResourceGroupReference(group='group_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceGroupHealth( + kind='kind_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceGroupHealth.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_health(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.BackendServiceGroupHealth) + assert response.kind == 'kind_value' + + +def test_get_health_rest_bad_request(transport: str = 'rest', request_type=compute.GetHealthRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request_init["resource_group_reference_resource"] = compute.ResourceGroupReference(group='group_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_health(request) + + +def test_get_health_rest_from_dict(): + test_get_health_rest(request_type=dict) + + +def test_get_health_rest_flattened(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceGroupHealth() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceGroupHealth.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + backend_service='backend_service_value', + resource_group_reference_resource=compute.ResourceGroupReference(group='group_value'), + ) + mock_args.update(sample_request) + client.get_health(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}/getHealth" % client.transport._host, args[1]) + + +def test_get_health_rest_flattened_error(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_health( + compute.GetHealthRegionBackendServiceRequest(), + project='project_value', + region='region_value', + backend_service='backend_service_value', + resource_group_reference_resource=compute.ResourceGroupReference(group='group_value'), + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/backendServices" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionBackendServiceRequest(), + project='project_value', + region='region_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionBackendServicesRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionBackendServicesRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.BackendServiceList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.BackendServiceList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/backendServices" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionBackendServicesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.BackendServiceList( + items=[ + compute.BackendService(), + compute.BackendService(), + compute.BackendService(), + ], + next_page_token='abc', + ), + compute.BackendServiceList( + items=[], + next_page_token='def', + ), + compute.BackendServiceList( + items=[ + compute.BackendService(), + ], + next_page_token='ghi', + ), + compute.BackendServiceList( + items=[ + compute.BackendService(), + compute.BackendService(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.BackendServiceList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.BackendService) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchRegionBackendServiceRequest(), + project='project_value', + region='region_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateRegionBackendServiceRequest): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + request_init["backend_service_resource"] = compute.BackendService(affinity_cookie_ttl_sec=2432) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "backend_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/backendServices/{backend_service}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateRegionBackendServiceRequest(), + project='project_value', + region='region_value', + backend_service='backend_service_value', + backend_service_resource=compute.BackendService(affinity_cookie_ttl_sec=2432), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionBackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionBackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionBackendServicesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionBackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionBackendServicesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionBackendServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionBackendServicesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionBackendServicesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_backend_services_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionBackendServicesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_backend_services_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_backend_services.transports.RegionBackendServicesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionBackendServicesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'get_health', + 'insert', + 'list', + 'patch', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_backend_services_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_backend_services.transports.RegionBackendServicesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionBackendServicesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_backend_services_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_backend_services.transports.RegionBackendServicesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionBackendServicesTransport() + adc.assert_called_once() + + +def test_region_backend_services_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionBackendServicesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_backend_services_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionBackendServicesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_backend_services_host_no_port(): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_backend_services_host_with_port(): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionBackendServicesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionBackendServicesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionBackendServicesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionBackendServicesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionBackendServicesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionBackendServicesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionBackendServicesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionBackendServicesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionBackendServicesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionBackendServicesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionBackendServicesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionBackendServicesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionBackendServicesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionBackendServicesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionBackendServicesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionBackendServicesTransport, '_prep_wrapped_messages') as prep: + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionBackendServicesTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionBackendServicesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionBackendServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_commitments.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_commitments.py new file mode 100644 index 000000000..259a685c7 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_commitments.py @@ -0,0 +1,1286 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_commitments import RegionCommitmentsClient +from google.cloud.compute_v1.services.region_commitments import pagers +from google.cloud.compute_v1.services.region_commitments import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionCommitmentsClient._get_default_mtls_endpoint(None) is None + assert RegionCommitmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionCommitmentsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionCommitmentsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionCommitmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionCommitmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionCommitmentsClient, +]) +def test_region_commitments_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionCommitmentsRestTransport, "rest"), +]) +def test_region_commitments_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionCommitmentsClient, +]) +def test_region_commitments_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_commitments_client_get_transport_class(): + transport = RegionCommitmentsClient.get_transport_class() + available_transports = [ + transports.RegionCommitmentsRestTransport, + ] + assert transport in available_transports + + transport = RegionCommitmentsClient.get_transport_class("rest") + assert transport == transports.RegionCommitmentsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionCommitmentsClient, transports.RegionCommitmentsRestTransport, "rest"), +]) +@mock.patch.object(RegionCommitmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionCommitmentsClient)) +def test_region_commitments_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionCommitmentsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionCommitmentsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionCommitmentsClient, transports.RegionCommitmentsRestTransport, "rest", "true"), + (RegionCommitmentsClient, transports.RegionCommitmentsRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionCommitmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionCommitmentsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_commitments_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionCommitmentsClient, transports.RegionCommitmentsRestTransport, "rest"), +]) +def test_region_commitments_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionCommitmentsClient, transports.RegionCommitmentsRestTransport, "rest"), +]) +def test_region_commitments_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListRegionCommitmentsRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.CommitmentAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.CommitmentAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListRegionCommitmentsRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.CommitmentAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.CommitmentAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/commitments" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListRegionCommitmentsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.CommitmentAggregatedList( + items={ + 'a':compute.CommitmentsScopedList(), + 'b':compute.CommitmentsScopedList(), + 'c':compute.CommitmentsScopedList(), + }, + next_page_token='abc', + ), + compute.CommitmentAggregatedList( + items={}, + next_page_token='def', + ), + compute.CommitmentAggregatedList( + items={ + 'g':compute.CommitmentsScopedList(), + }, + next_page_token='ghi', + ), + compute.CommitmentAggregatedList( + items={ + 'h':compute.CommitmentsScopedList(), + 'i':compute.CommitmentsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.CommitmentAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.CommitmentsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.CommitmentsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.CommitmentsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionCommitmentRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "commitment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Commitment( + category='category_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_timestamp='end_timestamp_value', + id=205, + kind='kind_value', + name='name_value', + plan='plan_value', + region='region_value', + self_link='self_link_value', + start_timestamp='start_timestamp_value', + status='status_value', + status_message='status_message_value', + type_='type__value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Commitment.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Commitment) + assert response.category == 'category_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_timestamp == 'end_timestamp_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.plan == 'plan_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_timestamp == 'start_timestamp_value' + assert response.status == 'status_value' + assert response.status_message == 'status_message_value' + assert response.type_ == 'type__value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionCommitmentRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "commitment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Commitment() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Commitment.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "commitment": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + commitment='commitment_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/commitments/{commitment}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionCommitmentRequest(), + project='project_value', + region='region_value', + commitment='commitment_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionCommitmentRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["commitment_resource"] = compute.Commitment(category='category_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionCommitmentRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["commitment_resource"] = compute.Commitment(category='category_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + commitment_resource=compute.Commitment(category='category_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/commitments" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionCommitmentRequest(), + project='project_value', + region='region_value', + commitment_resource=compute.Commitment(category='category_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionCommitmentsRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.CommitmentList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.CommitmentList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionCommitmentsRequest): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.CommitmentList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.CommitmentList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/commitments" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionCommitmentsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.CommitmentList( + items=[ + compute.Commitment(), + compute.Commitment(), + compute.Commitment(), + ], + next_page_token='abc', + ), + compute.CommitmentList( + items=[], + next_page_token='def', + ), + compute.CommitmentList( + items=[ + compute.Commitment(), + ], + next_page_token='ghi', + ), + compute.CommitmentList( + items=[ + compute.Commitment(), + compute.Commitment(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.CommitmentList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Commitment) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionCommitmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionCommitmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionCommitmentsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionCommitmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionCommitmentsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionCommitmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionCommitmentsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionCommitmentsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_commitments_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionCommitmentsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_commitments_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_commitments.transports.RegionCommitmentsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionCommitmentsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_commitments_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_commitments.transports.RegionCommitmentsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionCommitmentsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_commitments_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_commitments.transports.RegionCommitmentsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionCommitmentsTransport() + adc.assert_called_once() + + +def test_region_commitments_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionCommitmentsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_commitments_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionCommitmentsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_commitments_host_no_port(): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_commitments_host_with_port(): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionCommitmentsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionCommitmentsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionCommitmentsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionCommitmentsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionCommitmentsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionCommitmentsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionCommitmentsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionCommitmentsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionCommitmentsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionCommitmentsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionCommitmentsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionCommitmentsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionCommitmentsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionCommitmentsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionCommitmentsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionCommitmentsTransport, '_prep_wrapped_messages') as prep: + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionCommitmentsTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionCommitmentsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionCommitmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_disk_types.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_disk_types.py new file mode 100644 index 000000000..f032c04e3 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_disk_types.py @@ -0,0 +1,941 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_disk_types import RegionDiskTypesClient +from google.cloud.compute_v1.services.region_disk_types import pagers +from google.cloud.compute_v1.services.region_disk_types import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionDiskTypesClient._get_default_mtls_endpoint(None) is None + assert RegionDiskTypesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionDiskTypesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionDiskTypesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionDiskTypesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionDiskTypesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionDiskTypesClient, +]) +def test_region_disk_types_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionDiskTypesRestTransport, "rest"), +]) +def test_region_disk_types_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionDiskTypesClient, +]) +def test_region_disk_types_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_disk_types_client_get_transport_class(): + transport = RegionDiskTypesClient.get_transport_class() + available_transports = [ + transports.RegionDiskTypesRestTransport, + ] + assert transport in available_transports + + transport = RegionDiskTypesClient.get_transport_class("rest") + assert transport == transports.RegionDiskTypesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionDiskTypesClient, transports.RegionDiskTypesRestTransport, "rest"), +]) +@mock.patch.object(RegionDiskTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionDiskTypesClient)) +def test_region_disk_types_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionDiskTypesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionDiskTypesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionDiskTypesClient, transports.RegionDiskTypesRestTransport, "rest", "true"), + (RegionDiskTypesClient, transports.RegionDiskTypesRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionDiskTypesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionDiskTypesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_disk_types_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionDiskTypesClient, transports.RegionDiskTypesRestTransport, "rest"), +]) +def test_region_disk_types_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionDiskTypesClient, transports.RegionDiskTypesRestTransport, "rest"), +]) +def test_region_disk_types_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionDiskTypeRequest): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskType( + creation_timestamp='creation_timestamp_value', + default_disk_size_gb=2097, + description='description_value', + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + valid_disk_size='valid_disk_size_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskType.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.DiskType) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_disk_size_gb == 2097 + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.valid_disk_size == 'valid_disk_size_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionDiskTypeRequest): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk_type": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskType() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskType.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "disk_type": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk_type='disk_type_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/diskTypes/{disk_type}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionDiskTypeRequest(), + project='project_value', + region='region_value', + disk_type='disk_type_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionDiskTypesRequest): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionDiskTypeList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionDiskTypeList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionDiskTypesRequest): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionDiskTypeList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionDiskTypeList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/diskTypes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionDiskTypesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionDiskTypeList( + items=[ + compute.DiskType(), + compute.DiskType(), + compute.DiskType(), + ], + next_page_token='abc', + ), + compute.RegionDiskTypeList( + items=[], + next_page_token='def', + ), + compute.RegionDiskTypeList( + items=[ + compute.DiskType(), + ], + next_page_token='ghi', + ), + compute.RegionDiskTypeList( + items=[ + compute.DiskType(), + compute.DiskType(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionDiskTypeList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.DiskType) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionDiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionDiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionDiskTypesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionDiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionDiskTypesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionDiskTypesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionDiskTypesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionDiskTypesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_disk_types_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionDiskTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_disk_types_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_disk_types.transports.RegionDiskTypesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionDiskTypesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_disk_types_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_disk_types.transports.RegionDiskTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionDiskTypesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_disk_types_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_disk_types.transports.RegionDiskTypesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionDiskTypesTransport() + adc.assert_called_once() + + +def test_region_disk_types_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionDiskTypesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_disk_types_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionDiskTypesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_disk_types_host_no_port(): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_disk_types_host_with_port(): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionDiskTypesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionDiskTypesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDiskTypesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionDiskTypesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionDiskTypesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDiskTypesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionDiskTypesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionDiskTypesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDiskTypesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionDiskTypesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionDiskTypesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDiskTypesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionDiskTypesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionDiskTypesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDiskTypesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionDiskTypesTransport, '_prep_wrapped_messages') as prep: + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionDiskTypesTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionDiskTypesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionDiskTypesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_disks.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_disks.py new file mode 100644 index 000000000..8c404e741 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_disks.py @@ -0,0 +1,2413 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_disks import RegionDisksClient +from google.cloud.compute_v1.services.region_disks import pagers +from google.cloud.compute_v1.services.region_disks import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionDisksClient._get_default_mtls_endpoint(None) is None + assert RegionDisksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionDisksClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionDisksClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionDisksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionDisksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionDisksClient, +]) +def test_region_disks_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionDisksRestTransport, "rest"), +]) +def test_region_disks_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionDisksClient, +]) +def test_region_disks_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_disks_client_get_transport_class(): + transport = RegionDisksClient.get_transport_class() + available_transports = [ + transports.RegionDisksRestTransport, + ] + assert transport in available_transports + + transport = RegionDisksClient.get_transport_class("rest") + assert transport == transports.RegionDisksRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionDisksClient, transports.RegionDisksRestTransport, "rest"), +]) +@mock.patch.object(RegionDisksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionDisksClient)) +def test_region_disks_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionDisksClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionDisksClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionDisksClient, transports.RegionDisksRestTransport, "rest", "true"), + (RegionDisksClient, transports.RegionDisksRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionDisksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionDisksClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_disks_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionDisksClient, transports.RegionDisksRestTransport, "rest"), +]) +def test_region_disks_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionDisksClient, transports.RegionDisksRestTransport, "rest"), +]) +def test_region_disks_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_resource_policies_rest(transport: str = 'rest', request_type=compute.AddResourcePoliciesRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["region_disks_add_resource_policies_request_resource"] = compute.RegionDisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_resource_policies(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_resource_policies_rest_bad_request(transport: str = 'rest', request_type=compute.AddResourcePoliciesRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["region_disks_add_resource_policies_request_resource"] = compute.RegionDisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_resource_policies(request) + + +def test_add_resource_policies_rest_from_dict(): + test_add_resource_policies_rest(request_type=dict) + + +def test_add_resource_policies_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk='disk_value', + region_disks_add_resource_policies_request_resource=compute.RegionDisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + mock_args.update(sample_request) + client.add_resource_policies(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/addResourcePolicies" % client.transport._host, args[1]) + + +def test_add_resource_policies_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_resource_policies( + compute.AddResourcePoliciesRegionDiskRequest(), + project='project_value', + region='region_value', + disk='disk_value', + region_disks_add_resource_policies_request_resource=compute.RegionDisksAddResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + + +def test_create_snapshot_rest(transport: str = 'rest', request_type=compute.CreateSnapshotRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["snapshot_resource"] = compute.Snapshot(auto_created=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_snapshot(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_create_snapshot_rest_bad_request(transport: str = 'rest', request_type=compute.CreateSnapshotRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["snapshot_resource"] = compute.Snapshot(auto_created=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_snapshot(request) + + +def test_create_snapshot_rest_from_dict(): + test_create_snapshot_rest(request_type=dict) + + +def test_create_snapshot_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk='disk_value', + snapshot_resource=compute.Snapshot(auto_created=True), + ) + mock_args.update(sample_request) + client.create_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/createSnapshot" % client.transport._host, args[1]) + + +def test_create_snapshot_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_snapshot( + compute.CreateSnapshotRegionDiskRequest(), + project='project_value', + region='region_value', + disk='disk_value', + snapshot_resource=compute.Snapshot(auto_created=True), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk='disk_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionDiskRequest(), + project='project_value', + region='region_value', + disk='disk_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Disk( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + last_attach_timestamp='last_attach_timestamp_value', + last_detach_timestamp='last_detach_timestamp_value', + license_codes=[1360], + licenses=['licenses_value'], + location_hint='location_hint_value', + name='name_value', + options='options_value', + physical_block_size_bytes=2663, + provisioned_iops=1740, + region='region_value', + replica_zones=['replica_zones_value'], + resource_policies=['resource_policies_value'], + satisfies_pzs=True, + self_link='self_link_value', + size_gb=739, + source_disk='source_disk_value', + source_disk_id='source_disk_id_value', + source_image='source_image_value', + source_image_id='source_image_id_value', + source_snapshot='source_snapshot_value', + source_snapshot_id='source_snapshot_id_value', + source_storage_object='source_storage_object_value', + status='status_value', + type_='type__value', + users=['users_value'], + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Disk.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Disk) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.last_attach_timestamp == 'last_attach_timestamp_value' + assert response.last_detach_timestamp == 'last_detach_timestamp_value' + assert response.license_codes == [1360] + assert response.licenses == ['licenses_value'] + assert response.location_hint == 'location_hint_value' + assert response.name == 'name_value' + assert response.options == 'options_value' + assert response.physical_block_size_bytes == 2663 + assert response.provisioned_iops == 1740 + assert response.region == 'region_value' + assert response.replica_zones == ['replica_zones_value'] + assert response.resource_policies == ['resource_policies_value'] + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.size_gb == 739 + assert response.source_disk == 'source_disk_value' + assert response.source_disk_id == 'source_disk_id_value' + assert response.source_image == 'source_image_value' + assert response.source_image_id == 'source_image_id_value' + assert response.source_snapshot == 'source_snapshot_value' + assert response.source_snapshot_id == 'source_snapshot_id_value' + assert response.source_storage_object == 'source_storage_object_value' + assert response.status == 'status_value' + assert response.type_ == 'type__value' + assert response.users == ['users_value'] + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Disk() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Disk.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk='disk_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionDiskRequest(), + project='project_value', + region='region_value', + disk='disk_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyRegionDiskRequest(), + project='project_value', + region='region_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["disk_resource"] = compute.Disk(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["disk_resource"] = compute.Disk(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk_resource=compute.Disk(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionDiskRequest(), + project='project_value', + region='region_value', + disk_resource=compute.Disk(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionDisksRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionDisksRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DiskList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DiskList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionDisksRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.DiskList( + items=[ + compute.Disk(), + compute.Disk(), + compute.Disk(), + ], + next_page_token='abc', + ), + compute.DiskList( + items=[], + next_page_token='def', + ), + compute.DiskList( + items=[ + compute.Disk(), + ], + next_page_token='ghi', + ), + compute.DiskList( + items=[ + compute.Disk(), + compute.Disk(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.DiskList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Disk) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_remove_resource_policies_rest(transport: str = 'rest', request_type=compute.RemoveResourcePoliciesRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["region_disks_remove_resource_policies_request_resource"] = compute.RegionDisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_resource_policies(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_resource_policies_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveResourcePoliciesRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["region_disks_remove_resource_policies_request_resource"] = compute.RegionDisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_resource_policies(request) + + +def test_remove_resource_policies_rest_from_dict(): + test_remove_resource_policies_rest(request_type=dict) + + +def test_remove_resource_policies_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk='disk_value', + region_disks_remove_resource_policies_request_resource=compute.RegionDisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + mock_args.update(sample_request) + client.remove_resource_policies(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/removeResourcePolicies" % client.transport._host, args[1]) + + +def test_remove_resource_policies_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_resource_policies( + compute.RemoveResourcePoliciesRegionDiskRequest(), + project='project_value', + region='region_value', + disk='disk_value', + region_disks_remove_resource_policies_request_resource=compute.RegionDisksRemoveResourcePoliciesRequest(resource_policies=['resource_policies_value']), + ) + + +def test_resize_rest(transport: str = 'rest', request_type=compute.ResizeRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["region_disks_resize_request_resource"] = compute.RegionDisksResizeRequest(size_gb=739) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.resize(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_resize_rest_bad_request(transport: str = 'rest', request_type=compute.ResizeRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "disk": "sample3"} + request_init["region_disks_resize_request_resource"] = compute.RegionDisksResizeRequest(size_gb=739) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.resize(request) + + +def test_resize_rest_from_dict(): + test_resize_rest(request_type=dict) + + +def test_resize_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "disk": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + disk='disk_value', + region_disks_resize_request_resource=compute.RegionDisksResizeRequest(size_gb=739), + ) + mock_args.update(sample_request) + client.resize(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{disk}/resize" % client.transport._host, args[1]) + + +def test_resize_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resize( + compute.ResizeRegionDiskRequest(), + project='project_value', + region='region_value', + disk='disk_value', + region_disks_resize_request_resource=compute.RegionDisksResizeRequest(size_gb=739), + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyRegionDiskRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_labels_request_resource"] = compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_labels_request_resource"] = compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_labels_request_resource=compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsRegionDiskRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_labels_request_resource=compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsRegionDiskRequest): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/disks/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsRegionDiskRequest(), + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionDisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionDisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionDisksClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionDisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionDisksClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionDisksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionDisksClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionDisksRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_disks_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionDisksTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_disks_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_disks.transports.RegionDisksTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionDisksTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_resource_policies', + 'create_snapshot', + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'remove_resource_policies', + 'resize', + 'set_iam_policy', + 'set_labels', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_disks_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_disks.transports.RegionDisksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionDisksTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_disks_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_disks.transports.RegionDisksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionDisksTransport() + adc.assert_called_once() + + +def test_region_disks_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionDisksClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_disks_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionDisksRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_disks_host_no_port(): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_disks_host_with_port(): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionDisksClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionDisksClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDisksClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionDisksClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionDisksClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDisksClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionDisksClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionDisksClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDisksClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionDisksClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionDisksClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDisksClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionDisksClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionDisksClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionDisksClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionDisksTransport, '_prep_wrapped_messages') as prep: + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionDisksTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionDisksClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionDisksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_health_check_services.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_health_check_services.py new file mode 100644 index 000000000..d35fa5266 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_health_check_services.py @@ -0,0 +1,1405 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_health_check_services import RegionHealthCheckServicesClient +from google.cloud.compute_v1.services.region_health_check_services import pagers +from google.cloud.compute_v1.services.region_health_check_services import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionHealthCheckServicesClient._get_default_mtls_endpoint(None) is None + assert RegionHealthCheckServicesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionHealthCheckServicesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionHealthCheckServicesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionHealthCheckServicesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionHealthCheckServicesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionHealthCheckServicesClient, +]) +def test_region_health_check_services_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionHealthCheckServicesRestTransport, "rest"), +]) +def test_region_health_check_services_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionHealthCheckServicesClient, +]) +def test_region_health_check_services_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_health_check_services_client_get_transport_class(): + transport = RegionHealthCheckServicesClient.get_transport_class() + available_transports = [ + transports.RegionHealthCheckServicesRestTransport, + ] + assert transport in available_transports + + transport = RegionHealthCheckServicesClient.get_transport_class("rest") + assert transport == transports.RegionHealthCheckServicesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionHealthCheckServicesClient, transports.RegionHealthCheckServicesRestTransport, "rest"), +]) +@mock.patch.object(RegionHealthCheckServicesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionHealthCheckServicesClient)) +def test_region_health_check_services_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionHealthCheckServicesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionHealthCheckServicesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionHealthCheckServicesClient, transports.RegionHealthCheckServicesRestTransport, "rest", "true"), + (RegionHealthCheckServicesClient, transports.RegionHealthCheckServicesRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionHealthCheckServicesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionHealthCheckServicesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_health_check_services_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionHealthCheckServicesClient, transports.RegionHealthCheckServicesRestTransport, "rest"), +]) +def test_region_health_check_services_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionHealthCheckServicesClient, transports.RegionHealthCheckServicesRestTransport, "rest"), +]) +def test_region_health_check_services_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check_service='health_check_service_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionHealthCheckServiceRequest(), + project='project_value', + region='region_value', + health_check_service='health_check_service_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckService( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + health_checks=['health_checks_value'], + health_status_aggregation_policy='health_status_aggregation_policy_value', + id=205, + kind='kind_value', + name='name_value', + network_endpoint_groups=['network_endpoint_groups_value'], + notification_endpoints=['notification_endpoints_value'], + region='region_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckService.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.HealthCheckService) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.health_checks == ['health_checks_value'] + assert response.health_status_aggregation_policy == 'health_status_aggregation_policy_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network_endpoint_groups == ['network_endpoint_groups_value'] + assert response.notification_endpoints == ['notification_endpoints_value'] + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckService() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckService.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check_service='health_check_service_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionHealthCheckServiceRequest(), + project='project_value', + region='region_value', + health_check_service='health_check_service_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["health_check_service_resource"] = compute.HealthCheckService(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["health_check_service_resource"] = compute.HealthCheckService(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check_service_resource=compute.HealthCheckService(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionHealthCheckServiceRequest(), + project='project_value', + region='region_value', + health_check_service_resource=compute.HealthCheckService(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionHealthCheckServicesRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckServicesList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckServicesList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionHealthCheckServicesRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckServicesList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckServicesList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionHealthCheckServicesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.HealthCheckServicesList( + items=[ + compute.HealthCheckService(), + compute.HealthCheckService(), + compute.HealthCheckService(), + ], + next_page_token='abc', + ), + compute.HealthCheckServicesList( + items=[], + next_page_token='def', + ), + compute.HealthCheckServicesList( + items=[ + compute.HealthCheckService(), + ], + next_page_token='ghi', + ), + compute.HealthCheckServicesList( + items=[ + compute.HealthCheckService(), + compute.HealthCheckService(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.HealthCheckServicesList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.HealthCheckService) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + request_init["health_check_service_resource"] = compute.HealthCheckService(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRegionHealthCheckServiceRequest): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + request_init["health_check_service_resource"] = compute.HealthCheckService(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "health_check_service": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check_service='health_check_service_value', + health_check_service_resource=compute.HealthCheckService(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthCheckServices/{health_check_service}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchRegionHealthCheckServiceRequest(), + project='project_value', + region='region_value', + health_check_service='health_check_service_value', + health_check_service_resource=compute.HealthCheckService(creation_timestamp='creation_timestamp_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionHealthCheckServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionHealthCheckServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionHealthCheckServicesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionHealthCheckServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionHealthCheckServicesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionHealthCheckServicesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionHealthCheckServicesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionHealthCheckServicesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_health_check_services_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionHealthCheckServicesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_health_check_services_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_health_check_services.transports.RegionHealthCheckServicesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionHealthCheckServicesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_health_check_services_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_health_check_services.transports.RegionHealthCheckServicesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionHealthCheckServicesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_health_check_services_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_health_check_services.transports.RegionHealthCheckServicesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionHealthCheckServicesTransport() + adc.assert_called_once() + + +def test_region_health_check_services_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionHealthCheckServicesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_health_check_services_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionHealthCheckServicesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_health_check_services_host_no_port(): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_health_check_services_host_with_port(): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionHealthCheckServicesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionHealthCheckServicesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthCheckServicesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionHealthCheckServicesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionHealthCheckServicesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthCheckServicesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionHealthCheckServicesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionHealthCheckServicesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthCheckServicesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionHealthCheckServicesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionHealthCheckServicesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthCheckServicesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionHealthCheckServicesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionHealthCheckServicesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthCheckServicesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionHealthCheckServicesTransport, '_prep_wrapped_messages') as prep: + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionHealthCheckServicesTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionHealthCheckServicesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionHealthCheckServicesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_health_checks.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_health_checks.py new file mode 100644 index 000000000..87ed27e1a --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_health_checks.py @@ -0,0 +1,1561 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_health_checks import RegionHealthChecksClient +from google.cloud.compute_v1.services.region_health_checks import pagers +from google.cloud.compute_v1.services.region_health_checks import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionHealthChecksClient._get_default_mtls_endpoint(None) is None + assert RegionHealthChecksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionHealthChecksClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionHealthChecksClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionHealthChecksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionHealthChecksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionHealthChecksClient, +]) +def test_region_health_checks_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionHealthChecksRestTransport, "rest"), +]) +def test_region_health_checks_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionHealthChecksClient, +]) +def test_region_health_checks_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_health_checks_client_get_transport_class(): + transport = RegionHealthChecksClient.get_transport_class() + available_transports = [ + transports.RegionHealthChecksRestTransport, + ] + assert transport in available_transports + + transport = RegionHealthChecksClient.get_transport_class("rest") + assert transport == transports.RegionHealthChecksRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionHealthChecksClient, transports.RegionHealthChecksRestTransport, "rest"), +]) +@mock.patch.object(RegionHealthChecksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionHealthChecksClient)) +def test_region_health_checks_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionHealthChecksClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionHealthChecksClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionHealthChecksClient, transports.RegionHealthChecksRestTransport, "rest", "true"), + (RegionHealthChecksClient, transports.RegionHealthChecksRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionHealthChecksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionHealthChecksClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_health_checks_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionHealthChecksClient, transports.RegionHealthChecksRestTransport, "rest"), +]) +def test_region_health_checks_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionHealthChecksClient, transports.RegionHealthChecksRestTransport, "rest"), +]) +def test_region_health_checks_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check='health_check_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionHealthCheckRequest(), + project='project_value', + region='region_value', + health_check='health_check_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheck( + check_interval_sec=1884, + creation_timestamp='creation_timestamp_value', + description='description_value', + healthy_threshold=1819, + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + timeout_sec=1185, + type_='type__value', + unhealthy_threshold=2046, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheck.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.HealthCheck) + assert response.check_interval_sec == 1884 + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.healthy_threshold == 1819 + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.timeout_sec == 1185 + assert response.type_ == 'type__value' + assert response.unhealthy_threshold == 2046 + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheck() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheck.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check='health_check_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionHealthCheckRequest(), + project='project_value', + region='region_value', + health_check='health_check_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthChecks" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionHealthCheckRequest(), + project='project_value', + region='region_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionHealthChecksRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionHealthChecksRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.HealthCheckList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.HealthCheckList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthChecks" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionHealthChecksRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.HealthCheckList( + items=[ + compute.HealthCheck(), + compute.HealthCheck(), + compute.HealthCheck(), + ], + next_page_token='abc', + ), + compute.HealthCheckList( + items=[], + next_page_token='def', + ), + compute.HealthCheckList( + items=[ + compute.HealthCheck(), + ], + next_page_token='ghi', + ), + compute.HealthCheckList( + items=[ + compute.HealthCheck(), + compute.HealthCheck(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.HealthCheckList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.HealthCheck) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchRegionHealthCheckRequest(), + project='project_value', + region='region_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateRegionHealthCheckRequest): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + request_init["health_check_resource"] = compute.HealthCheck(check_interval_sec=1884) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "health_check": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/healthChecks/{health_check}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateRegionHealthCheckRequest(), + project='project_value', + region='region_value', + health_check='health_check_value', + health_check_resource=compute.HealthCheck(check_interval_sec=1884), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionHealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionHealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionHealthChecksClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionHealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionHealthChecksClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionHealthChecksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionHealthChecksClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionHealthChecksRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_health_checks_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionHealthChecksTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_health_checks_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_health_checks.transports.RegionHealthChecksTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionHealthChecksTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_health_checks_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_health_checks.transports.RegionHealthChecksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionHealthChecksTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_health_checks_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_health_checks.transports.RegionHealthChecksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionHealthChecksTransport() + adc.assert_called_once() + + +def test_region_health_checks_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionHealthChecksClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_health_checks_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionHealthChecksRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_health_checks_host_no_port(): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_health_checks_host_with_port(): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionHealthChecksClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionHealthChecksClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthChecksClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionHealthChecksClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionHealthChecksClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthChecksClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionHealthChecksClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionHealthChecksClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthChecksClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionHealthChecksClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionHealthChecksClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthChecksClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionHealthChecksClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionHealthChecksClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionHealthChecksClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionHealthChecksTransport, '_prep_wrapped_messages') as prep: + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionHealthChecksTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionHealthChecksClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionHealthChecksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instance_group_managers.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instance_group_managers.py new file mode 100644 index 000000000..80524da9b --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instance_group_managers.py @@ -0,0 +1,3636 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_instance_group_managers import RegionInstanceGroupManagersClient +from google.cloud.compute_v1.services.region_instance_group_managers import pagers +from google.cloud.compute_v1.services.region_instance_group_managers import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionInstanceGroupManagersClient._get_default_mtls_endpoint(None) is None + assert RegionInstanceGroupManagersClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionInstanceGroupManagersClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionInstanceGroupManagersClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionInstanceGroupManagersClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionInstanceGroupManagersClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionInstanceGroupManagersClient, +]) +def test_region_instance_group_managers_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionInstanceGroupManagersRestTransport, "rest"), +]) +def test_region_instance_group_managers_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionInstanceGroupManagersClient, +]) +def test_region_instance_group_managers_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_instance_group_managers_client_get_transport_class(): + transport = RegionInstanceGroupManagersClient.get_transport_class() + available_transports = [ + transports.RegionInstanceGroupManagersRestTransport, + ] + assert transport in available_transports + + transport = RegionInstanceGroupManagersClient.get_transport_class("rest") + assert transport == transports.RegionInstanceGroupManagersRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstanceGroupManagersClient, transports.RegionInstanceGroupManagersRestTransport, "rest"), +]) +@mock.patch.object(RegionInstanceGroupManagersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionInstanceGroupManagersClient)) +def test_region_instance_group_managers_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionInstanceGroupManagersClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionInstanceGroupManagersClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionInstanceGroupManagersClient, transports.RegionInstanceGroupManagersRestTransport, "rest", "true"), + (RegionInstanceGroupManagersClient, transports.RegionInstanceGroupManagersRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionInstanceGroupManagersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionInstanceGroupManagersClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_instance_group_managers_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstanceGroupManagersClient, transports.RegionInstanceGroupManagersRestTransport, "rest"), +]) +def test_region_instance_group_managers_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstanceGroupManagersClient, transports.RegionInstanceGroupManagersRestTransport, "rest"), +]) +def test_region_instance_group_managers_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_abandon_instances_rest(transport: str = 'rest', request_type=compute.AbandonInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_abandon_instances_request_resource"] = compute.RegionInstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.abandon_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_abandon_instances_rest_bad_request(transport: str = 'rest', request_type=compute.AbandonInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_abandon_instances_request_resource"] = compute.RegionInstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.abandon_instances(request) + + +def test_abandon_instances_rest_from_dict(): + test_abandon_instances_rest(request_type=dict) + + +def test_abandon_instances_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_abandon_instances_request_resource=compute.RegionInstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']), + ) + mock_args.update(sample_request) + client.abandon_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/abandonInstances" % client.transport._host, args[1]) + + +def test_abandon_instances_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.abandon_instances( + compute.AbandonInstancesRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_abandon_instances_request_resource=compute.RegionInstanceGroupManagersAbandonInstancesRequest(instances=['instances_value']), + ) + + +def test_apply_updates_to_instances_rest(transport: str = 'rest', request_type=compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_apply_updates_request_resource"] = compute.RegionInstanceGroupManagersApplyUpdatesRequest(all_instances=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.apply_updates_to_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_apply_updates_to_instances_rest_bad_request(transport: str = 'rest', request_type=compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_apply_updates_request_resource"] = compute.RegionInstanceGroupManagersApplyUpdatesRequest(all_instances=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.apply_updates_to_instances(request) + + +def test_apply_updates_to_instances_rest_from_dict(): + test_apply_updates_to_instances_rest(request_type=dict) + + +def test_apply_updates_to_instances_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_apply_updates_request_resource=compute.RegionInstanceGroupManagersApplyUpdatesRequest(all_instances=True), + ) + mock_args.update(sample_request) + client.apply_updates_to_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/applyUpdatesToInstances" % client.transport._host, args[1]) + + +def test_apply_updates_to_instances_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.apply_updates_to_instances( + compute.ApplyUpdatesToInstancesRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_apply_updates_request_resource=compute.RegionInstanceGroupManagersApplyUpdatesRequest(all_instances=True), + ) + + +def test_create_instances_rest(transport: str = 'rest', request_type=compute.CreateInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_create_instances_request_resource"] = compute.RegionInstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_create_instances_rest_bad_request(transport: str = 'rest', request_type=compute.CreateInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_create_instances_request_resource"] = compute.RegionInstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_instances(request) + + +def test_create_instances_rest_from_dict(): + test_create_instances_rest(request_type=dict) + + +def test_create_instances_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_create_instances_request_resource=compute.RegionInstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + mock_args.update(sample_request) + client.create_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/createInstances" % client.transport._host, args[1]) + + +def test_create_instances_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instances( + compute.CreateInstancesRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_create_instances_request_resource=compute.RegionInstanceGroupManagersCreateInstancesRequest(instances=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_delete_instances_rest(transport: str = 'rest', request_type=compute.DeleteInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_delete_instances_request_resource"] = compute.RegionInstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_instances_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_delete_instances_request_resource"] = compute.RegionInstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_instances(request) + + +def test_delete_instances_rest_from_dict(): + test_delete_instances_rest(request_type=dict) + + +def test_delete_instances_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_delete_instances_request_resource=compute.RegionInstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']), + ) + mock_args.update(sample_request) + client.delete_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/deleteInstances" % client.transport._host, args[1]) + + +def test_delete_instances_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instances( + compute.DeleteInstancesRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_delete_instances_request_resource=compute.RegionInstanceGroupManagersDeleteInstancesRequest(instances=['instances_value']), + ) + + +def test_delete_per_instance_configs_rest(transport: str = 'rest', request_type=compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_manager_delete_instance_config_req_resource"] = compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(names=['names_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_manager_delete_instance_config_req_resource"] = compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(names=['names_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_per_instance_configs(request) + + +def test_delete_per_instance_configs_rest_from_dict(): + test_delete_per_instance_configs_rest(request_type=dict) + + +def test_delete_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_manager_delete_instance_config_req_resource=compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(names=['names_value']), + ) + mock_args.update(sample_request) + client.delete_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/deletePerInstanceConfigs" % client.transport._host, args[1]) + + +def test_delete_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_per_instance_configs( + compute.DeletePerInstanceConfigsRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_manager_delete_instance_config_req_resource=compute.RegionInstanceGroupManagerDeleteInstanceConfigReq(names=['names_value']), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManager( + base_instance_name='base_instance_name_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + instance_group='instance_group_value', + instance_template='instance_template_value', + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + target_pools=['target_pools_value'], + target_size=1185, + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManager.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InstanceGroupManager) + assert response.base_instance_name == 'base_instance_name_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.instance_group == 'instance_group_value' + assert response.instance_template == 'instance_template_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.target_pools == ['target_pools_value'] + assert response.target_size == 1185 + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroupManager() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroupManager.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagerList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagerList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagerList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagerList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionInstanceGroupManagersRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionInstanceGroupManagerList( + items=[ + compute.InstanceGroupManager(), + compute.InstanceGroupManager(), + compute.InstanceGroupManager(), + ], + next_page_token='abc', + ), + compute.RegionInstanceGroupManagerList( + items=[], + next_page_token='def', + ), + compute.RegionInstanceGroupManagerList( + items=[ + compute.InstanceGroupManager(), + ], + next_page_token='ghi', + ), + compute.RegionInstanceGroupManagerList( + items=[ + compute.InstanceGroupManager(), + compute.InstanceGroupManager(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionInstanceGroupManagerList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceGroupManager) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_errors_rest(transport: str = 'rest', request_type=compute.ListErrorsRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagersListErrorsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagersListErrorsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_errors(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListErrorsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_errors_rest_bad_request(transport: str = 'rest', request_type=compute.ListErrorsRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_errors(request) + + +def test_list_errors_rest_from_dict(): + test_list_errors_rest(request_type=dict) + + +def test_list_errors_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagersListErrorsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagersListErrorsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.list_errors(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listErrors" % client.transport._host, args[1]) + + +def test_list_errors_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_errors( + compute.ListErrorsRegionInstanceGroupManagersRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_list_errors_rest_pager(): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionInstanceGroupManagersListErrorsResponse( + items=[ + compute.InstanceManagedByIgmError(), + compute.InstanceManagedByIgmError(), + compute.InstanceManagedByIgmError(), + ], + next_page_token='abc', + ), + compute.RegionInstanceGroupManagersListErrorsResponse( + items=[], + next_page_token='def', + ), + compute.RegionInstanceGroupManagersListErrorsResponse( + items=[ + compute.InstanceManagedByIgmError(), + ], + next_page_token='ghi', + ), + compute.RegionInstanceGroupManagersListErrorsResponse( + items=[ + compute.InstanceManagedByIgmError(), + compute.InstanceManagedByIgmError(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionInstanceGroupManagersListErrorsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + pager = client.list_errors(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceManagedByIgmError) + for i in results) + + pages = list(client.list_errors(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_managed_instances_rest(transport: str = 'rest', request_type=compute.ListManagedInstancesRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagersListInstancesResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagersListInstancesResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_managed_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListManagedInstancesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_managed_instances_rest_bad_request(transport: str = 'rest', request_type=compute.ListManagedInstancesRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_managed_instances(request) + + +def test_list_managed_instances_rest_from_dict(): + test_list_managed_instances_rest(request_type=dict) + + +def test_list_managed_instances_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagersListInstancesResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagersListInstancesResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.list_managed_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listManagedInstances" % client.transport._host, args[1]) + + +def test_list_managed_instances_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_managed_instances( + compute.ListManagedInstancesRegionInstanceGroupManagersRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_list_managed_instances_rest_pager(): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionInstanceGroupManagersListInstancesResponse( + managed_instances=[ + compute.ManagedInstance(), + compute.ManagedInstance(), + compute.ManagedInstance(), + ], + next_page_token='abc', + ), + compute.RegionInstanceGroupManagersListInstancesResponse( + managed_instances=[], + next_page_token='def', + ), + compute.RegionInstanceGroupManagersListInstancesResponse( + managed_instances=[ + compute.ManagedInstance(), + ], + next_page_token='ghi', + ), + compute.RegionInstanceGroupManagersListInstancesResponse( + managed_instances=[ + compute.ManagedInstance(), + compute.ManagedInstance(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionInstanceGroupManagersListInstancesResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + pager = client.list_managed_instances(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ManagedInstance) + for i in results) + + pages = list(client.list_managed_instances(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_per_instance_configs_rest(transport: str = 'rest', request_type=compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPerInstanceConfigsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_per_instance_configs(request) + + +def test_list_per_instance_configs_rest_from_dict(): + test_list_per_instance_configs_rest(request_type=dict) + + +def test_list_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupManagersListInstanceConfigsResp.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + mock_args.update(sample_request) + client.list_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/listPerInstanceConfigs" % client.transport._host, args[1]) + + +def test_list_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_per_instance_configs( + compute.ListPerInstanceConfigsRegionInstanceGroupManagersRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + ) + + +def test_list_per_instance_configs_rest_pager(): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionInstanceGroupManagersListInstanceConfigsResp( + items=[ + compute.PerInstanceConfig(), + compute.PerInstanceConfig(), + compute.PerInstanceConfig(), + ], + next_page_token='abc', + ), + compute.RegionInstanceGroupManagersListInstanceConfigsResp( + items=[], + next_page_token='def', + ), + compute.RegionInstanceGroupManagersListInstanceConfigsResp( + items=[ + compute.PerInstanceConfig(), + ], + next_page_token='ghi', + ), + compute.RegionInstanceGroupManagersListInstanceConfigsResp( + items=[ + compute.PerInstanceConfig(), + compute.PerInstanceConfig(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionInstanceGroupManagersListInstanceConfigsResp.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + pager = client.list_per_instance_configs(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.PerInstanceConfig) + for i in results) + + pages = list(client.list_per_instance_configs(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["instance_group_manager_resource"] = compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + instance_group_manager_resource=compute.InstanceGroupManager(auto_healing_policies=[compute.InstanceGroupManagerAutoHealingPolicy(health_check='health_check_value')]), + ) + + +def test_patch_per_instance_configs_rest(transport: str = 'rest', request_type=compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_manager_patch_instance_config_req_resource"] = compute.RegionInstanceGroupManagerPatchInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_manager_patch_instance_config_req_resource"] = compute.RegionInstanceGroupManagerPatchInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch_per_instance_configs(request) + + +def test_patch_per_instance_configs_rest_from_dict(): + test_patch_per_instance_configs_rest(request_type=dict) + + +def test_patch_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_manager_patch_instance_config_req_resource=compute.RegionInstanceGroupManagerPatchInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + mock_args.update(sample_request) + client.patch_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/patchPerInstanceConfigs" % client.transport._host, args[1]) + + +def test_patch_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch_per_instance_configs( + compute.PatchPerInstanceConfigsRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_manager_patch_instance_config_req_resource=compute.RegionInstanceGroupManagerPatchInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + + +def test_recreate_instances_rest(transport: str = 'rest', request_type=compute.RecreateInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_recreate_request_resource"] = compute.RegionInstanceGroupManagersRecreateRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.recreate_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_recreate_instances_rest_bad_request(transport: str = 'rest', request_type=compute.RecreateInstancesRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_recreate_request_resource"] = compute.RegionInstanceGroupManagersRecreateRequest(instances=['instances_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.recreate_instances(request) + + +def test_recreate_instances_rest_from_dict(): + test_recreate_instances_rest(request_type=dict) + + +def test_recreate_instances_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_recreate_request_resource=compute.RegionInstanceGroupManagersRecreateRequest(instances=['instances_value']), + ) + mock_args.update(sample_request) + client.recreate_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/recreateInstances" % client.transport._host, args[1]) + + +def test_recreate_instances_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.recreate_instances( + compute.RecreateInstancesRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_recreate_request_resource=compute.RegionInstanceGroupManagersRecreateRequest(instances=['instances_value']), + ) + + +def test_resize_rest(transport: str = 'rest', request_type=compute.ResizeRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.resize(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_resize_rest_bad_request(transport: str = 'rest', request_type=compute.ResizeRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.resize(request) + + +def test_resize_rest_from_dict(): + test_resize_rest(request_type=dict) + + +def test_resize_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + size=443, + ) + mock_args.update(sample_request) + client.resize(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/resize" % client.transport._host, args[1]) + + +def test_resize_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resize( + compute.ResizeRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + size=443, + ) + + +def test_set_instance_template_rest(transport: str = 'rest', request_type=compute.SetInstanceTemplateRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_set_template_request_resource"] = compute.RegionInstanceGroupManagersSetTemplateRequest(instance_template='instance_template_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_instance_template(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_instance_template_rest_bad_request(transport: str = 'rest', request_type=compute.SetInstanceTemplateRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_set_template_request_resource"] = compute.RegionInstanceGroupManagersSetTemplateRequest(instance_template='instance_template_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_instance_template(request) + + +def test_set_instance_template_rest_from_dict(): + test_set_instance_template_rest(request_type=dict) + + +def test_set_instance_template_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_set_template_request_resource=compute.RegionInstanceGroupManagersSetTemplateRequest(instance_template='instance_template_value'), + ) + mock_args.update(sample_request) + client.set_instance_template(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/setInstanceTemplate" % client.transport._host, args[1]) + + +def test_set_instance_template_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_instance_template( + compute.SetInstanceTemplateRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_set_template_request_resource=compute.RegionInstanceGroupManagersSetTemplateRequest(instance_template='instance_template_value'), + ) + + +def test_set_target_pools_rest(transport: str = 'rest', request_type=compute.SetTargetPoolsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_set_target_pools_request_resource"] = compute.RegionInstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_target_pools(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_target_pools_rest_bad_request(transport: str = 'rest', request_type=compute.SetTargetPoolsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_managers_set_target_pools_request_resource"] = compute.RegionInstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_target_pools(request) + + +def test_set_target_pools_rest_from_dict(): + test_set_target_pools_rest(request_type=dict) + + +def test_set_target_pools_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_set_target_pools_request_resource=compute.RegionInstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_target_pools(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/setTargetPools" % client.transport._host, args[1]) + + +def test_set_target_pools_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_target_pools( + compute.SetTargetPoolsRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_managers_set_target_pools_request_resource=compute.RegionInstanceGroupManagersSetTargetPoolsRequest(fingerprint='fingerprint_value'), + ) + + +def test_update_per_instance_configs_rest(transport: str = 'rest', request_type=compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_manager_update_instance_config_req_resource"] = compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_per_instance_configs(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_per_instance_configs_rest_bad_request(transport: str = 'rest', request_type=compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + request_init["region_instance_group_manager_update_instance_config_req_resource"] = compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_per_instance_configs(request) + + +def test_update_per_instance_configs_rest_from_dict(): + test_update_per_instance_configs_rest(request_type=dict) + + +def test_update_per_instance_configs_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group_manager": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_manager_update_instance_config_req_resource=compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + mock_args.update(sample_request) + client.update_per_instance_configs(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroupManagers/{instance_group_manager}/updatePerInstanceConfigs" % client.transport._host, args[1]) + + +def test_update_per_instance_configs_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_per_instance_configs( + compute.UpdatePerInstanceConfigsRegionInstanceGroupManagerRequest(), + project='project_value', + region='region_value', + instance_group_manager='instance_group_manager_value', + region_instance_group_manager_update_instance_config_req_resource=compute.RegionInstanceGroupManagerUpdateInstanceConfigReq(per_instance_configs=[compute.PerInstanceConfig(fingerprint='fingerprint_value')]), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionInstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionInstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstanceGroupManagersClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionInstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstanceGroupManagersClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionInstanceGroupManagersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionInstanceGroupManagersClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionInstanceGroupManagersRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_instance_group_managers_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionInstanceGroupManagersTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_instance_group_managers_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_instance_group_managers.transports.RegionInstanceGroupManagersTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionInstanceGroupManagersTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'abandon_instances', + 'apply_updates_to_instances', + 'create_instances', + 'delete', + 'delete_instances', + 'delete_per_instance_configs', + 'get', + 'insert', + 'list', + 'list_errors', + 'list_managed_instances', + 'list_per_instance_configs', + 'patch', + 'patch_per_instance_configs', + 'recreate_instances', + 'resize', + 'set_instance_template', + 'set_target_pools', + 'update_per_instance_configs', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_instance_group_managers_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_instance_group_managers.transports.RegionInstanceGroupManagersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionInstanceGroupManagersTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_instance_group_managers_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_instance_group_managers.transports.RegionInstanceGroupManagersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionInstanceGroupManagersTransport() + adc.assert_called_once() + + +def test_region_instance_group_managers_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionInstanceGroupManagersClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_instance_group_managers_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionInstanceGroupManagersRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_instance_group_managers_host_no_port(): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_instance_group_managers_host_with_port(): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionInstanceGroupManagersClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionInstanceGroupManagersClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupManagersClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionInstanceGroupManagersClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionInstanceGroupManagersClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupManagersClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionInstanceGroupManagersClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionInstanceGroupManagersClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupManagersClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionInstanceGroupManagersClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionInstanceGroupManagersClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupManagersClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionInstanceGroupManagersClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionInstanceGroupManagersClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupManagersClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionInstanceGroupManagersTransport, '_prep_wrapped_messages') as prep: + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionInstanceGroupManagersTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionInstanceGroupManagersClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionInstanceGroupManagersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instance_groups.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instance_groups.py new file mode 100644 index 000000000..a4038025d --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instance_groups.py @@ -0,0 +1,1281 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_instance_groups import RegionInstanceGroupsClient +from google.cloud.compute_v1.services.region_instance_groups import pagers +from google.cloud.compute_v1.services.region_instance_groups import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionInstanceGroupsClient._get_default_mtls_endpoint(None) is None + assert RegionInstanceGroupsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionInstanceGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionInstanceGroupsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionInstanceGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionInstanceGroupsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionInstanceGroupsClient, +]) +def test_region_instance_groups_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionInstanceGroupsRestTransport, "rest"), +]) +def test_region_instance_groups_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionInstanceGroupsClient, +]) +def test_region_instance_groups_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_instance_groups_client_get_transport_class(): + transport = RegionInstanceGroupsClient.get_transport_class() + available_transports = [ + transports.RegionInstanceGroupsRestTransport, + ] + assert transport in available_transports + + transport = RegionInstanceGroupsClient.get_transport_class("rest") + assert transport == transports.RegionInstanceGroupsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstanceGroupsClient, transports.RegionInstanceGroupsRestTransport, "rest"), +]) +@mock.patch.object(RegionInstanceGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionInstanceGroupsClient)) +def test_region_instance_groups_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionInstanceGroupsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionInstanceGroupsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionInstanceGroupsClient, transports.RegionInstanceGroupsRestTransport, "rest", "true"), + (RegionInstanceGroupsClient, transports.RegionInstanceGroupsRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionInstanceGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionInstanceGroupsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_instance_groups_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstanceGroupsClient, transports.RegionInstanceGroupsRestTransport, "rest"), +]) +def test_region_instance_groups_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstanceGroupsClient, transports.RegionInstanceGroupsRestTransport, "rest"), +]) +def test_region_instance_groups_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionInstanceGroupRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroup( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + network='network_value', + region='region_value', + self_link='self_link_value', + size=443, + subnetwork='subnetwork_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroup.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.InstanceGroup) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.size == 443 + assert response.subnetwork == 'subnetwork_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionInstanceGroupRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.InstanceGroup() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.InstanceGroup.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group='instance_group_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionInstanceGroupRequest(), + project='project_value', + region='region_value', + instance_group='instance_group_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionInstanceGroupsRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionInstanceGroupsRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroups" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionInstanceGroupsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionInstanceGroupList( + items=[ + compute.InstanceGroup(), + compute.InstanceGroup(), + compute.InstanceGroup(), + ], + next_page_token='abc', + ), + compute.RegionInstanceGroupList( + items=[], + next_page_token='def', + ), + compute.RegionInstanceGroupList( + items=[ + compute.InstanceGroup(), + ], + next_page_token='ghi', + ), + compute.RegionInstanceGroupList( + items=[ + compute.InstanceGroup(), + compute.InstanceGroup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionInstanceGroupList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceGroup) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_instances_rest(transport: str = 'rest', request_type=compute.ListInstancesRegionInstanceGroupsRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + request_init["region_instance_groups_list_instances_request_resource"] = compute.RegionInstanceGroupsListInstancesRequest(instance_state='instance_state_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupsListInstances( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupsListInstances.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_instances(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListInstancesPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_instances_rest_bad_request(transport: str = 'rest', request_type=compute.ListInstancesRegionInstanceGroupsRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + request_init["region_instance_groups_list_instances_request_resource"] = compute.RegionInstanceGroupsListInstancesRequest(instance_state='instance_state_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_instances(request) + + +def test_list_instances_rest_from_dict(): + test_list_instances_rest(request_type=dict) + + +def test_list_instances_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionInstanceGroupsListInstances() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionInstanceGroupsListInstances.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group='instance_group_value', + region_instance_groups_list_instances_request_resource=compute.RegionInstanceGroupsListInstancesRequest(instance_state='instance_state_value'), + ) + mock_args.update(sample_request) + client.list_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}/listInstances" % client.transport._host, args[1]) + + +def test_list_instances_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + compute.ListInstancesRegionInstanceGroupsRequest(), + project='project_value', + region='region_value', + instance_group='instance_group_value', + region_instance_groups_list_instances_request_resource=compute.RegionInstanceGroupsListInstancesRequest(instance_state='instance_state_value'), + ) + + +def test_list_instances_rest_pager(): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionInstanceGroupsListInstances( + items=[ + compute.InstanceWithNamedPorts(), + compute.InstanceWithNamedPorts(), + compute.InstanceWithNamedPorts(), + ], + next_page_token='abc', + ), + compute.RegionInstanceGroupsListInstances( + items=[], + next_page_token='def', + ), + compute.RegionInstanceGroupsListInstances( + items=[ + compute.InstanceWithNamedPorts(), + ], + next_page_token='ghi', + ), + compute.RegionInstanceGroupsListInstances( + items=[ + compute.InstanceWithNamedPorts(), + compute.InstanceWithNamedPorts(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionInstanceGroupsListInstances.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + sample_request["region_instance_groups_list_instances_request_resource"] = compute.RegionInstanceGroupsListInstancesRequest(instance_state='instance_state_value') + + pager = client.list_instances(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.InstanceWithNamedPorts) + for i in results) + + pages = list(client.list_instances(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_named_ports_rest(transport: str = 'rest', request_type=compute.SetNamedPortsRegionInstanceGroupRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + request_init["region_instance_groups_set_named_ports_request_resource"] = compute.RegionInstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_named_ports(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_named_ports_rest_bad_request(transport: str = 'rest', request_type=compute.SetNamedPortsRegionInstanceGroupRequest): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + request_init["region_instance_groups_set_named_ports_request_resource"] = compute.RegionInstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_named_ports(request) + + +def test_set_named_ports_rest_from_dict(): + test_set_named_ports_rest(request_type=dict) + + +def test_set_named_ports_rest_flattened(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "instance_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + instance_group='instance_group_value', + region_instance_groups_set_named_ports_request_resource=compute.RegionInstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_named_ports(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instanceGroups/{instance_group}/setNamedPorts" % client.transport._host, args[1]) + + +def test_set_named_ports_rest_flattened_error(transport: str = 'rest'): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_named_ports( + compute.SetNamedPortsRegionInstanceGroupRequest(), + project='project_value', + region='region_value', + instance_group='instance_group_value', + region_instance_groups_set_named_ports_request_resource=compute.RegionInstanceGroupsSetNamedPortsRequest(fingerprint='fingerprint_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionInstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionInstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstanceGroupsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionInstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstanceGroupsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionInstanceGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionInstanceGroupsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionInstanceGroupsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_instance_groups_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionInstanceGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_instance_groups_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_instance_groups.transports.RegionInstanceGroupsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionInstanceGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'get', + 'list', + 'list_instances', + 'set_named_ports', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_instance_groups_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_instance_groups.transports.RegionInstanceGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionInstanceGroupsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_instance_groups_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_instance_groups.transports.RegionInstanceGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionInstanceGroupsTransport() + adc.assert_called_once() + + +def test_region_instance_groups_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionInstanceGroupsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_instance_groups_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionInstanceGroupsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_instance_groups_host_no_port(): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_instance_groups_host_with_port(): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionInstanceGroupsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionInstanceGroupsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionInstanceGroupsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionInstanceGroupsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionInstanceGroupsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionInstanceGroupsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionInstanceGroupsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionInstanceGroupsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionInstanceGroupsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionInstanceGroupsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstanceGroupsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionInstanceGroupsTransport, '_prep_wrapped_messages') as prep: + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionInstanceGroupsTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionInstanceGroupsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionInstanceGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instances.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instances.py new file mode 100644 index 000000000..87db7b975 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_instances.py @@ -0,0 +1,789 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_instances import RegionInstancesClient +from google.cloud.compute_v1.services.region_instances import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionInstancesClient._get_default_mtls_endpoint(None) is None + assert RegionInstancesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionInstancesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionInstancesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionInstancesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionInstancesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionInstancesClient, +]) +def test_region_instances_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionInstancesRestTransport, "rest"), +]) +def test_region_instances_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionInstancesClient, +]) +def test_region_instances_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_instances_client_get_transport_class(): + transport = RegionInstancesClient.get_transport_class() + available_transports = [ + transports.RegionInstancesRestTransport, + ] + assert transport in available_transports + + transport = RegionInstancesClient.get_transport_class("rest") + assert transport == transports.RegionInstancesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstancesClient, transports.RegionInstancesRestTransport, "rest"), +]) +@mock.patch.object(RegionInstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionInstancesClient)) +def test_region_instances_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionInstancesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionInstancesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionInstancesClient, transports.RegionInstancesRestTransport, "rest", "true"), + (RegionInstancesClient, transports.RegionInstancesRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionInstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionInstancesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_instances_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstancesClient, transports.RegionInstancesRestTransport, "rest"), +]) +def test_region_instances_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionInstancesClient, transports.RegionInstancesRestTransport, "rest"), +]) +def test_region_instances_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_bulk_insert_rest(transport: str = 'rest', request_type=compute.BulkInsertRegionInstanceRequest): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["bulk_insert_instance_resource_resource"] = compute.BulkInsertInstanceResource(count=553) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.bulk_insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_bulk_insert_rest_bad_request(transport: str = 'rest', request_type=compute.BulkInsertRegionInstanceRequest): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["bulk_insert_instance_resource_resource"] = compute.BulkInsertInstanceResource(count=553) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.bulk_insert(request) + + +def test_bulk_insert_rest_from_dict(): + test_bulk_insert_rest(request_type=dict) + + +def test_bulk_insert_rest_flattened(transport: str = 'rest'): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + bulk_insert_instance_resource_resource=compute.BulkInsertInstanceResource(count=553), + ) + mock_args.update(sample_request) + client.bulk_insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/instances/bulkInsert" % client.transport._host, args[1]) + + +def test_bulk_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.bulk_insert( + compute.BulkInsertRegionInstanceRequest(), + project='project_value', + region='region_value', + bulk_insert_instance_resource_resource=compute.BulkInsertInstanceResource(count=553), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstancesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionInstancesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionInstancesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionInstancesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_instances_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionInstancesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_instances_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_instances.transports.RegionInstancesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionInstancesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'bulk_insert', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_instances_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_instances.transports.RegionInstancesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionInstancesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_instances_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_instances.transports.RegionInstancesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionInstancesTransport() + adc.assert_called_once() + + +def test_region_instances_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionInstancesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_instances_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionInstancesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_instances_host_no_port(): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_instances_host_with_port(): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionInstancesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionInstancesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstancesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionInstancesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionInstancesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstancesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionInstancesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionInstancesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstancesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionInstancesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionInstancesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstancesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionInstancesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionInstancesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionInstancesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionInstancesTransport, '_prep_wrapped_messages') as prep: + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionInstancesTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionInstancesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_network_endpoint_groups.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_network_endpoint_groups.py new file mode 100644 index 000000000..79637565e --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_network_endpoint_groups.py @@ -0,0 +1,1251 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_network_endpoint_groups import RegionNetworkEndpointGroupsClient +from google.cloud.compute_v1.services.region_network_endpoint_groups import pagers +from google.cloud.compute_v1.services.region_network_endpoint_groups import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionNetworkEndpointGroupsClient._get_default_mtls_endpoint(None) is None + assert RegionNetworkEndpointGroupsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionNetworkEndpointGroupsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionNetworkEndpointGroupsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionNetworkEndpointGroupsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionNetworkEndpointGroupsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionNetworkEndpointGroupsClient, +]) +def test_region_network_endpoint_groups_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionNetworkEndpointGroupsRestTransport, "rest"), +]) +def test_region_network_endpoint_groups_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionNetworkEndpointGroupsClient, +]) +def test_region_network_endpoint_groups_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_network_endpoint_groups_client_get_transport_class(): + transport = RegionNetworkEndpointGroupsClient.get_transport_class() + available_transports = [ + transports.RegionNetworkEndpointGroupsRestTransport, + ] + assert transport in available_transports + + transport = RegionNetworkEndpointGroupsClient.get_transport_class("rest") + assert transport == transports.RegionNetworkEndpointGroupsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionNetworkEndpointGroupsClient, transports.RegionNetworkEndpointGroupsRestTransport, "rest"), +]) +@mock.patch.object(RegionNetworkEndpointGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionNetworkEndpointGroupsClient)) +def test_region_network_endpoint_groups_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionNetworkEndpointGroupsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionNetworkEndpointGroupsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionNetworkEndpointGroupsClient, transports.RegionNetworkEndpointGroupsRestTransport, "rest", "true"), + (RegionNetworkEndpointGroupsClient, transports.RegionNetworkEndpointGroupsRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionNetworkEndpointGroupsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionNetworkEndpointGroupsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_network_endpoint_groups_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionNetworkEndpointGroupsClient, transports.RegionNetworkEndpointGroupsRestTransport, "rest"), +]) +def test_region_network_endpoint_groups_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionNetworkEndpointGroupsClient, transports.RegionNetworkEndpointGroupsRestTransport, "rest"), +]) +def test_region_network_endpoint_groups_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionNetworkEndpointGroupRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionNetworkEndpointGroupRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "network_endpoint_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + network_endpoint_group='network_endpoint_group_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups/{network_endpoint_group}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionNetworkEndpointGroupRequest(), + project='project_value', + region='region_value', + network_endpoint_group='network_endpoint_group_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionNetworkEndpointGroupRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroup( + creation_timestamp='creation_timestamp_value', + default_port=1289, + description='description_value', + id=205, + kind='kind_value', + name='name_value', + network='network_value', + network_endpoint_type='network_endpoint_type_value', + region='region_value', + self_link='self_link_value', + size=443, + subnetwork='subnetwork_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroup.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NetworkEndpointGroup) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_port == 1289 + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.network_endpoint_type == 'network_endpoint_type_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.size == 443 + assert response.subnetwork == 'subnetwork_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionNetworkEndpointGroupRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "network_endpoint_group": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroup() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroup.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "network_endpoint_group": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + network_endpoint_group='network_endpoint_group_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups/{network_endpoint_group}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionNetworkEndpointGroupRequest(), + project='project_value', + region='region_value', + network_endpoint_group='network_endpoint_group_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionNetworkEndpointGroupRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["network_endpoint_group_resource"] = compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionNetworkEndpointGroupRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["network_endpoint_group_resource"] = compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + network_endpoint_group_resource=compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionNetworkEndpointGroupRequest(), + project='project_value', + region='region_value', + network_endpoint_group_resource=compute.NetworkEndpointGroup(annotations={'key_value': 'value_value'}), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionNetworkEndpointGroupsRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionNetworkEndpointGroupsRequest): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NetworkEndpointGroupList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NetworkEndpointGroupList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/networkEndpointGroups" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionNetworkEndpointGroupsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + ], + next_page_token='abc', + ), + compute.NetworkEndpointGroupList( + items=[], + next_page_token='def', + ), + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + ], + next_page_token='ghi', + ), + compute.NetworkEndpointGroupList( + items=[ + compute.NetworkEndpointGroup(), + compute.NetworkEndpointGroup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NetworkEndpointGroupList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NetworkEndpointGroup) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionNetworkEndpointGroupsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionNetworkEndpointGroupsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionNetworkEndpointGroupsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionNetworkEndpointGroupsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionNetworkEndpointGroupsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_network_endpoint_groups_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionNetworkEndpointGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_network_endpoint_groups_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_network_endpoint_groups.transports.RegionNetworkEndpointGroupsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionNetworkEndpointGroupsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_network_endpoint_groups_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_network_endpoint_groups.transports.RegionNetworkEndpointGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionNetworkEndpointGroupsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_network_endpoint_groups_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_network_endpoint_groups.transports.RegionNetworkEndpointGroupsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionNetworkEndpointGroupsTransport() + adc.assert_called_once() + + +def test_region_network_endpoint_groups_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionNetworkEndpointGroupsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_network_endpoint_groups_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionNetworkEndpointGroupsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_network_endpoint_groups_host_no_port(): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_network_endpoint_groups_host_with_port(): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionNetworkEndpointGroupsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionNetworkEndpointGroupsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNetworkEndpointGroupsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionNetworkEndpointGroupsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionNetworkEndpointGroupsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNetworkEndpointGroupsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionNetworkEndpointGroupsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionNetworkEndpointGroupsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNetworkEndpointGroupsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionNetworkEndpointGroupsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionNetworkEndpointGroupsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNetworkEndpointGroupsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionNetworkEndpointGroupsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionNetworkEndpointGroupsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNetworkEndpointGroupsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionNetworkEndpointGroupsTransport, '_prep_wrapped_messages') as prep: + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionNetworkEndpointGroupsTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionNetworkEndpointGroupsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionNetworkEndpointGroupsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_notification_endpoints.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_notification_endpoints.py new file mode 100644 index 000000000..66150c2d8 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_notification_endpoints.py @@ -0,0 +1,1239 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_notification_endpoints import RegionNotificationEndpointsClient +from google.cloud.compute_v1.services.region_notification_endpoints import pagers +from google.cloud.compute_v1.services.region_notification_endpoints import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionNotificationEndpointsClient._get_default_mtls_endpoint(None) is None + assert RegionNotificationEndpointsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionNotificationEndpointsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionNotificationEndpointsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionNotificationEndpointsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionNotificationEndpointsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionNotificationEndpointsClient, +]) +def test_region_notification_endpoints_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionNotificationEndpointsRestTransport, "rest"), +]) +def test_region_notification_endpoints_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionNotificationEndpointsClient, +]) +def test_region_notification_endpoints_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_notification_endpoints_client_get_transport_class(): + transport = RegionNotificationEndpointsClient.get_transport_class() + available_transports = [ + transports.RegionNotificationEndpointsRestTransport, + ] + assert transport in available_transports + + transport = RegionNotificationEndpointsClient.get_transport_class("rest") + assert transport == transports.RegionNotificationEndpointsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionNotificationEndpointsClient, transports.RegionNotificationEndpointsRestTransport, "rest"), +]) +@mock.patch.object(RegionNotificationEndpointsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionNotificationEndpointsClient)) +def test_region_notification_endpoints_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionNotificationEndpointsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionNotificationEndpointsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionNotificationEndpointsClient, transports.RegionNotificationEndpointsRestTransport, "rest", "true"), + (RegionNotificationEndpointsClient, transports.RegionNotificationEndpointsRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionNotificationEndpointsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionNotificationEndpointsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_notification_endpoints_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionNotificationEndpointsClient, transports.RegionNotificationEndpointsRestTransport, "rest"), +]) +def test_region_notification_endpoints_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionNotificationEndpointsClient, transports.RegionNotificationEndpointsRestTransport, "rest"), +]) +def test_region_notification_endpoints_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionNotificationEndpointRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "notification_endpoint": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionNotificationEndpointRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "notification_endpoint": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "notification_endpoint": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + notification_endpoint='notification_endpoint_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/notificationEndpoints/{notification_endpoint}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionNotificationEndpointRequest(), + project='project_value', + region='region_value', + notification_endpoint='notification_endpoint_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionNotificationEndpointRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "notification_endpoint": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NotificationEndpoint( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NotificationEndpoint.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.NotificationEndpoint) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionNotificationEndpointRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "notification_endpoint": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NotificationEndpoint() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NotificationEndpoint.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "notification_endpoint": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + notification_endpoint='notification_endpoint_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/notificationEndpoints/{notification_endpoint}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionNotificationEndpointRequest(), + project='project_value', + region='region_value', + notification_endpoint='notification_endpoint_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionNotificationEndpointRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["notification_endpoint_resource"] = compute.NotificationEndpoint(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionNotificationEndpointRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["notification_endpoint_resource"] = compute.NotificationEndpoint(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + notification_endpoint_resource=compute.NotificationEndpoint(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/notificationEndpoints" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionNotificationEndpointRequest(), + project='project_value', + region='region_value', + notification_endpoint_resource=compute.NotificationEndpoint(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionNotificationEndpointsRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NotificationEndpointList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NotificationEndpointList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionNotificationEndpointsRequest): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.NotificationEndpointList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.NotificationEndpointList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/notificationEndpoints" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionNotificationEndpointsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.NotificationEndpointList( + items=[ + compute.NotificationEndpoint(), + compute.NotificationEndpoint(), + compute.NotificationEndpoint(), + ], + next_page_token='abc', + ), + compute.NotificationEndpointList( + items=[], + next_page_token='def', + ), + compute.NotificationEndpointList( + items=[ + compute.NotificationEndpoint(), + ], + next_page_token='ghi', + ), + compute.NotificationEndpointList( + items=[ + compute.NotificationEndpoint(), + compute.NotificationEndpoint(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.NotificationEndpointList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.NotificationEndpoint) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionNotificationEndpointsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionNotificationEndpointsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionNotificationEndpointsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionNotificationEndpointsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionNotificationEndpointsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionNotificationEndpointsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionNotificationEndpointsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionNotificationEndpointsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_notification_endpoints_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionNotificationEndpointsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_notification_endpoints_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_notification_endpoints.transports.RegionNotificationEndpointsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionNotificationEndpointsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_notification_endpoints_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_notification_endpoints.transports.RegionNotificationEndpointsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionNotificationEndpointsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_notification_endpoints_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_notification_endpoints.transports.RegionNotificationEndpointsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionNotificationEndpointsTransport() + adc.assert_called_once() + + +def test_region_notification_endpoints_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionNotificationEndpointsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_notification_endpoints_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionNotificationEndpointsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_notification_endpoints_host_no_port(): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_notification_endpoints_host_with_port(): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionNotificationEndpointsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionNotificationEndpointsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNotificationEndpointsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionNotificationEndpointsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionNotificationEndpointsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNotificationEndpointsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionNotificationEndpointsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionNotificationEndpointsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNotificationEndpointsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionNotificationEndpointsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionNotificationEndpointsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNotificationEndpointsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionNotificationEndpointsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionNotificationEndpointsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionNotificationEndpointsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionNotificationEndpointsTransport, '_prep_wrapped_messages') as prep: + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionNotificationEndpointsTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionNotificationEndpointsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionNotificationEndpointsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_operations.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_operations.py new file mode 100644 index 000000000..b563c6737 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_operations.py @@ -0,0 +1,1223 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_operations import RegionOperationsClient +from google.cloud.compute_v1.services.region_operations import pagers +from google.cloud.compute_v1.services.region_operations import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionOperationsClient._get_default_mtls_endpoint(None) is None + assert RegionOperationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionOperationsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionOperationsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionOperationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionOperationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionOperationsClient, +]) +def test_region_operations_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionOperationsRestTransport, "rest"), +]) +def test_region_operations_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionOperationsClient, +]) +def test_region_operations_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_operations_client_get_transport_class(): + transport = RegionOperationsClient.get_transport_class() + available_transports = [ + transports.RegionOperationsRestTransport, + ] + assert transport in available_transports + + transport = RegionOperationsClient.get_transport_class("rest") + assert transport == transports.RegionOperationsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionOperationsClient, transports.RegionOperationsRestTransport, "rest"), +]) +@mock.patch.object(RegionOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionOperationsClient)) +def test_region_operations_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionOperationsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionOperationsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionOperationsClient, transports.RegionOperationsRestTransport, "rest", "true"), + (RegionOperationsClient, transports.RegionOperationsRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionOperationsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_operations_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionOperationsClient, transports.RegionOperationsRestTransport, "rest"), +]) +def test_region_operations_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionOperationsClient, transports.RegionOperationsRestTransport, "rest"), +]) +def test_region_operations_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionOperationRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteRegionOperationResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteRegionOperationResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.DeleteRegionOperationResponse) + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionOperationRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteRegionOperationResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteRegionOperationResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "operation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/operations/{operation}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionOperationRequest(), + project='project_value', + region='region_value', + operation='operation_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionOperationRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionOperationRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "operation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/operations/{operation}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionOperationRequest(), + project='project_value', + region='region_value', + operation='operation_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionOperationsRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionOperationsRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/operations" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionOperationsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + compute.Operation(), + ], + next_page_token='abc', + ), + compute.OperationList( + items=[], + next_page_token='def', + ), + compute.OperationList( + items=[ + compute.Operation(), + ], + next_page_token='ghi', + ), + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.OperationList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Operation) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_wait_rest(transport: str = 'rest', request_type=compute.WaitRegionOperationRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.wait(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_wait_rest_bad_request(transport: str = 'rest', request_type=compute.WaitRegionOperationRequest): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait(request) + + +def test_wait_rest_from_dict(): + test_wait_rest(request_type=dict) + + +def test_wait_rest_flattened(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "operation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.wait(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/operations/{operation}/wait" % client.transport._host, args[1]) + + +def test_wait_rest_flattened_error(transport: str = 'rest'): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.wait( + compute.WaitRegionOperationRequest(), + project='project_value', + region='region_value', + operation='operation_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionOperationsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionOperationsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionOperationsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionOperationsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_operations_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_operations_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_operations.transports.RegionOperationsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'list', + 'wait', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_operations_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_operations.transports.RegionOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionOperationsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_operations_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_operations.transports.RegionOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionOperationsTransport() + adc.assert_called_once() + + +def test_region_operations_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionOperationsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_operations_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionOperationsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_operations_host_no_port(): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_operations_host_with_port(): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionOperationsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionOperationsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionOperationsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionOperationsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionOperationsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionOperationsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionOperationsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionOperationsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionOperationsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionOperationsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionOperationsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionOperationsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionOperationsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionOperationsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionOperationsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionOperationsTransport, '_prep_wrapped_messages') as prep: + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionOperationsTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionOperationsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_ssl_certificates.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_ssl_certificates.py new file mode 100644 index 000000000..a073d247e --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_ssl_certificates.py @@ -0,0 +1,1249 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_ssl_certificates import RegionSslCertificatesClient +from google.cloud.compute_v1.services.region_ssl_certificates import pagers +from google.cloud.compute_v1.services.region_ssl_certificates import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionSslCertificatesClient._get_default_mtls_endpoint(None) is None + assert RegionSslCertificatesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionSslCertificatesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionSslCertificatesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionSslCertificatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionSslCertificatesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionSslCertificatesClient, +]) +def test_region_ssl_certificates_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionSslCertificatesRestTransport, "rest"), +]) +def test_region_ssl_certificates_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionSslCertificatesClient, +]) +def test_region_ssl_certificates_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_ssl_certificates_client_get_transport_class(): + transport = RegionSslCertificatesClient.get_transport_class() + available_transports = [ + transports.RegionSslCertificatesRestTransport, + ] + assert transport in available_transports + + transport = RegionSslCertificatesClient.get_transport_class("rest") + assert transport == transports.RegionSslCertificatesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionSslCertificatesClient, transports.RegionSslCertificatesRestTransport, "rest"), +]) +@mock.patch.object(RegionSslCertificatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionSslCertificatesClient)) +def test_region_ssl_certificates_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionSslCertificatesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionSslCertificatesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionSslCertificatesClient, transports.RegionSslCertificatesRestTransport, "rest", "true"), + (RegionSslCertificatesClient, transports.RegionSslCertificatesRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionSslCertificatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionSslCertificatesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_ssl_certificates_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionSslCertificatesClient, transports.RegionSslCertificatesRestTransport, "rest"), +]) +def test_region_ssl_certificates_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionSslCertificatesClient, transports.RegionSslCertificatesRestTransport, "rest"), +]) +def test_region_ssl_certificates_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionSslCertificateRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "ssl_certificate": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionSslCertificateRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "ssl_certificate": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "ssl_certificate": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ssl_certificate='ssl_certificate_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/sslCertificates/{ssl_certificate}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionSslCertificateRequest(), + project='project_value', + region='region_value', + ssl_certificate='ssl_certificate_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionSslCertificateRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "ssl_certificate": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificate( + certificate='certificate_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + expire_time='expire_time_value', + id=205, + kind='kind_value', + name='name_value', + private_key='private_key_value', + region='region_value', + self_link='self_link_value', + subject_alternative_names=['subject_alternative_names_value'], + type_='type__value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificate.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SslCertificate) + assert response.certificate == 'certificate_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.expire_time == 'expire_time_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.private_key == 'private_key_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.subject_alternative_names == ['subject_alternative_names_value'] + assert response.type_ == 'type__value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionSslCertificateRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "ssl_certificate": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificate() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificate.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "ssl_certificate": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ssl_certificate='ssl_certificate_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/sslCertificates/{ssl_certificate}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionSslCertificateRequest(), + project='project_value', + region='region_value', + ssl_certificate='ssl_certificate_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionSslCertificateRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["ssl_certificate_resource"] = compute.SslCertificate(certificate='certificate_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionSslCertificateRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["ssl_certificate_resource"] = compute.SslCertificate(certificate='certificate_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ssl_certificate_resource=compute.SslCertificate(certificate='certificate_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/sslCertificates" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionSslCertificateRequest(), + project='project_value', + region='region_value', + ssl_certificate_resource=compute.SslCertificate(certificate='certificate_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionSslCertificatesRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificateList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificateList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionSslCertificatesRequest): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificateList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificateList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/sslCertificates" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionSslCertificatesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SslCertificateList( + items=[ + compute.SslCertificate(), + compute.SslCertificate(), + compute.SslCertificate(), + ], + next_page_token='abc', + ), + compute.SslCertificateList( + items=[], + next_page_token='def', + ), + compute.SslCertificateList( + items=[ + compute.SslCertificate(), + ], + next_page_token='ghi', + ), + compute.SslCertificateList( + items=[ + compute.SslCertificate(), + compute.SslCertificate(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SslCertificateList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.SslCertificate) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionSslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionSslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionSslCertificatesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionSslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionSslCertificatesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionSslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionSslCertificatesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionSslCertificatesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_ssl_certificates_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionSslCertificatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_ssl_certificates_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_ssl_certificates.transports.RegionSslCertificatesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionSslCertificatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_ssl_certificates_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_ssl_certificates.transports.RegionSslCertificatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionSslCertificatesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_ssl_certificates_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_ssl_certificates.transports.RegionSslCertificatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionSslCertificatesTransport() + adc.assert_called_once() + + +def test_region_ssl_certificates_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionSslCertificatesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_ssl_certificates_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionSslCertificatesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_ssl_certificates_host_no_port(): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_ssl_certificates_host_with_port(): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionSslCertificatesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionSslCertificatesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionSslCertificatesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionSslCertificatesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionSslCertificatesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionSslCertificatesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionSslCertificatesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionSslCertificatesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionSslCertificatesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionSslCertificatesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionSslCertificatesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionSslCertificatesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionSslCertificatesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionSslCertificatesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionSslCertificatesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionSslCertificatesTransport, '_prep_wrapped_messages') as prep: + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionSslCertificatesTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionSslCertificatesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionSslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_target_http_proxies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_target_http_proxies.py new file mode 100644 index 000000000..2b789d2ea --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_target_http_proxies.py @@ -0,0 +1,1401 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_target_http_proxies import RegionTargetHttpProxiesClient +from google.cloud.compute_v1.services.region_target_http_proxies import pagers +from google.cloud.compute_v1.services.region_target_http_proxies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionTargetHttpProxiesClient._get_default_mtls_endpoint(None) is None + assert RegionTargetHttpProxiesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionTargetHttpProxiesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionTargetHttpProxiesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionTargetHttpProxiesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionTargetHttpProxiesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionTargetHttpProxiesClient, +]) +def test_region_target_http_proxies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionTargetHttpProxiesRestTransport, "rest"), +]) +def test_region_target_http_proxies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionTargetHttpProxiesClient, +]) +def test_region_target_http_proxies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_target_http_proxies_client_get_transport_class(): + transport = RegionTargetHttpProxiesClient.get_transport_class() + available_transports = [ + transports.RegionTargetHttpProxiesRestTransport, + ] + assert transport in available_transports + + transport = RegionTargetHttpProxiesClient.get_transport_class("rest") + assert transport == transports.RegionTargetHttpProxiesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionTargetHttpProxiesClient, transports.RegionTargetHttpProxiesRestTransport, "rest"), +]) +@mock.patch.object(RegionTargetHttpProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionTargetHttpProxiesClient)) +def test_region_target_http_proxies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionTargetHttpProxiesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionTargetHttpProxiesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionTargetHttpProxiesClient, transports.RegionTargetHttpProxiesRestTransport, "rest", "true"), + (RegionTargetHttpProxiesClient, transports.RegionTargetHttpProxiesRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionTargetHttpProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionTargetHttpProxiesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_target_http_proxies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionTargetHttpProxiesClient, transports.RegionTargetHttpProxiesRestTransport, "rest"), +]) +def test_region_target_http_proxies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionTargetHttpProxiesClient, transports.RegionTargetHttpProxiesRestTransport, "rest"), +]) +def test_region_target_http_proxies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_http_proxy='target_http_proxy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionTargetHttpProxyRequest(), + project='project_value', + region='region_value', + target_http_proxy='target_http_proxy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxy( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + proxy_bind=True, + region='region_value', + self_link='self_link_value', + url_map='url_map_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetHttpProxy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.proxy_bind is True + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.url_map == 'url_map_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_http_proxy='target_http_proxy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionTargetHttpProxyRequest(), + project='project_value', + region='region_value', + target_http_proxy='target_http_proxy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_http_proxy_resource"] = compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_http_proxy_resource"] = compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_http_proxy_resource=compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpProxies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionTargetHttpProxyRequest(), + project='project_value', + region='region_value', + target_http_proxy_resource=compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionTargetHttpProxiesRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionTargetHttpProxiesRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpProxies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionTargetHttpProxiesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetHttpProxyList( + items=[ + compute.TargetHttpProxy(), + compute.TargetHttpProxy(), + compute.TargetHttpProxy(), + ], + next_page_token='abc', + ), + compute.TargetHttpProxyList( + items=[], + next_page_token='def', + ), + compute.TargetHttpProxyList( + items=[ + compute.TargetHttpProxy(), + ], + next_page_token='ghi', + ), + compute.TargetHttpProxyList( + items=[ + compute.TargetHttpProxy(), + compute.TargetHttpProxy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetHttpProxyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetHttpProxy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_url_map_rest(transport: str = 'rest', request_type=compute.SetUrlMapRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_url_map(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_url_map_rest_bad_request(transport: str = 'rest', request_type=compute.SetUrlMapRegionTargetHttpProxyRequest): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_url_map(request) + + +def test_set_url_map_rest_from_dict(): + test_set_url_map_rest(request_type=dict) + + +def test_set_url_map_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_http_proxy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_http_proxy='target_http_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + mock_args.update(sample_request) + client.set_url_map(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpProxies/{target_http_proxy}/setUrlMap" % client.transport._host, args[1]) + + +def test_set_url_map_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_url_map( + compute.SetUrlMapRegionTargetHttpProxyRequest(), + project='project_value', + region='region_value', + target_http_proxy='target_http_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionTargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionTargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionTargetHttpProxiesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionTargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionTargetHttpProxiesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionTargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionTargetHttpProxiesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionTargetHttpProxiesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_target_http_proxies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionTargetHttpProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_target_http_proxies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_target_http_proxies.transports.RegionTargetHttpProxiesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionTargetHttpProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'set_url_map', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_target_http_proxies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_target_http_proxies.transports.RegionTargetHttpProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionTargetHttpProxiesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_target_http_proxies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_target_http_proxies.transports.RegionTargetHttpProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionTargetHttpProxiesTransport() + adc.assert_called_once() + + +def test_region_target_http_proxies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionTargetHttpProxiesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_target_http_proxies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionTargetHttpProxiesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_target_http_proxies_host_no_port(): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_target_http_proxies_host_with_port(): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionTargetHttpProxiesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionTargetHttpProxiesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpProxiesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionTargetHttpProxiesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionTargetHttpProxiesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpProxiesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionTargetHttpProxiesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionTargetHttpProxiesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpProxiesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionTargetHttpProxiesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionTargetHttpProxiesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpProxiesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionTargetHttpProxiesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionTargetHttpProxiesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpProxiesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionTargetHttpProxiesTransport, '_prep_wrapped_messages') as prep: + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionTargetHttpProxiesTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionTargetHttpProxiesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionTargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_target_https_proxies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_target_https_proxies.py new file mode 100644 index 000000000..6bef8bd51 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_target_https_proxies.py @@ -0,0 +1,1567 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_target_https_proxies import RegionTargetHttpsProxiesClient +from google.cloud.compute_v1.services.region_target_https_proxies import pagers +from google.cloud.compute_v1.services.region_target_https_proxies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionTargetHttpsProxiesClient._get_default_mtls_endpoint(None) is None + assert RegionTargetHttpsProxiesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionTargetHttpsProxiesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionTargetHttpsProxiesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionTargetHttpsProxiesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionTargetHttpsProxiesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionTargetHttpsProxiesClient, +]) +def test_region_target_https_proxies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionTargetHttpsProxiesRestTransport, "rest"), +]) +def test_region_target_https_proxies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionTargetHttpsProxiesClient, +]) +def test_region_target_https_proxies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_target_https_proxies_client_get_transport_class(): + transport = RegionTargetHttpsProxiesClient.get_transport_class() + available_transports = [ + transports.RegionTargetHttpsProxiesRestTransport, + ] + assert transport in available_transports + + transport = RegionTargetHttpsProxiesClient.get_transport_class("rest") + assert transport == transports.RegionTargetHttpsProxiesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionTargetHttpsProxiesClient, transports.RegionTargetHttpsProxiesRestTransport, "rest"), +]) +@mock.patch.object(RegionTargetHttpsProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionTargetHttpsProxiesClient)) +def test_region_target_https_proxies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionTargetHttpsProxiesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionTargetHttpsProxiesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionTargetHttpsProxiesClient, transports.RegionTargetHttpsProxiesRestTransport, "rest", "true"), + (RegionTargetHttpsProxiesClient, transports.RegionTargetHttpsProxiesRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionTargetHttpsProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionTargetHttpsProxiesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_target_https_proxies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionTargetHttpsProxiesClient, transports.RegionTargetHttpsProxiesRestTransport, "rest"), +]) +def test_region_target_https_proxies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionTargetHttpsProxiesClient, transports.RegionTargetHttpsProxiesRestTransport, "rest"), +]) +def test_region_target_https_proxies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionTargetHttpsProxyRequest(), + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxy( + authorization_policy='authorization_policy_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + proxy_bind=True, + quic_override='quic_override_value', + region='region_value', + self_link='self_link_value', + server_tls_policy='server_tls_policy_value', + ssl_certificates=['ssl_certificates_value'], + ssl_policy='ssl_policy_value', + url_map='url_map_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetHttpsProxy) + assert response.authorization_policy == 'authorization_policy_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.proxy_bind is True + assert response.quic_override == 'quic_override_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.server_tls_policy == 'server_tls_policy_value' + assert response.ssl_certificates == ['ssl_certificates_value'] + assert response.ssl_policy == 'ssl_policy_value' + assert response.url_map == 'url_map_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionTargetHttpsProxyRequest(), + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_https_proxy_resource"] = compute.TargetHttpsProxy(authorization_policy='authorization_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_https_proxy_resource"] = compute.TargetHttpsProxy(authorization_policy='authorization_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_https_proxy_resource=compute.TargetHttpsProxy(authorization_policy='authorization_policy_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionTargetHttpsProxyRequest(), + project='project_value', + region='region_value', + target_https_proxy_resource=compute.TargetHttpsProxy(authorization_policy='authorization_policy_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionTargetHttpsProxiesRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionTargetHttpsProxiesRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionTargetHttpsProxiesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetHttpsProxyList( + items=[ + compute.TargetHttpsProxy(), + compute.TargetHttpsProxy(), + compute.TargetHttpsProxy(), + ], + next_page_token='abc', + ), + compute.TargetHttpsProxyList( + items=[], + next_page_token='def', + ), + compute.TargetHttpsProxyList( + items=[ + compute.TargetHttpsProxy(), + ], + next_page_token='ghi', + ), + compute.TargetHttpsProxyList( + items=[ + compute.TargetHttpsProxy(), + compute.TargetHttpsProxy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetHttpsProxyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetHttpsProxy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_ssl_certificates_rest(transport: str = 'rest', request_type=compute.SetSslCertificatesRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request_init["region_target_https_proxies_set_ssl_certificates_request_resource"] = compute.RegionTargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_ssl_certificates(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_ssl_certificates_rest_bad_request(transport: str = 'rest', request_type=compute.SetSslCertificatesRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request_init["region_target_https_proxies_set_ssl_certificates_request_resource"] = compute.RegionTargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_ssl_certificates(request) + + +def test_set_ssl_certificates_rest_from_dict(): + test_set_ssl_certificates_rest(request_type=dict) + + +def test_set_ssl_certificates_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + region_target_https_proxies_set_ssl_certificates_request_resource=compute.RegionTargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']), + ) + mock_args.update(sample_request) + client.set_ssl_certificates(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}/setSslCertificates" % client.transport._host, args[1]) + + +def test_set_ssl_certificates_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_ssl_certificates( + compute.SetSslCertificatesRegionTargetHttpsProxyRequest(), + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + region_target_https_proxies_set_ssl_certificates_request_resource=compute.RegionTargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']), + ) + + +def test_set_url_map_rest(transport: str = 'rest', request_type=compute.SetUrlMapRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_url_map(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_url_map_rest_bad_request(transport: str = 'rest', request_type=compute.SetUrlMapRegionTargetHttpsProxyRequest): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_url_map(request) + + +def test_set_url_map_rest_from_dict(): + test_set_url_map_rest(request_type=dict) + + +def test_set_url_map_rest_flattened(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_https_proxy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + mock_args.update(sample_request) + client.set_url_map(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetHttpsProxies/{target_https_proxy}/setUrlMap" % client.transport._host, args[1]) + + +def test_set_url_map_rest_flattened_error(transport: str = 'rest'): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_url_map( + compute.SetUrlMapRegionTargetHttpsProxyRequest(), + project='project_value', + region='region_value', + target_https_proxy='target_https_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionTargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionTargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionTargetHttpsProxiesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionTargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionTargetHttpsProxiesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionTargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionTargetHttpsProxiesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionTargetHttpsProxiesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_target_https_proxies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionTargetHttpsProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_target_https_proxies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_target_https_proxies.transports.RegionTargetHttpsProxiesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionTargetHttpsProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'set_ssl_certificates', + 'set_url_map', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_target_https_proxies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_target_https_proxies.transports.RegionTargetHttpsProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionTargetHttpsProxiesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_target_https_proxies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_target_https_proxies.transports.RegionTargetHttpsProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionTargetHttpsProxiesTransport() + adc.assert_called_once() + + +def test_region_target_https_proxies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionTargetHttpsProxiesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_target_https_proxies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionTargetHttpsProxiesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_target_https_proxies_host_no_port(): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_target_https_proxies_host_with_port(): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionTargetHttpsProxiesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionTargetHttpsProxiesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpsProxiesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionTargetHttpsProxiesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionTargetHttpsProxiesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpsProxiesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionTargetHttpsProxiesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionTargetHttpsProxiesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpsProxiesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionTargetHttpsProxiesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionTargetHttpsProxiesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpsProxiesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionTargetHttpsProxiesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionTargetHttpsProxiesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionTargetHttpsProxiesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionTargetHttpsProxiesTransport, '_prep_wrapped_messages') as prep: + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionTargetHttpsProxiesTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionTargetHttpsProxiesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionTargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_url_maps.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_url_maps.py new file mode 100644 index 000000000..dcfcd178f --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_region_url_maps.py @@ -0,0 +1,1667 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.region_url_maps import RegionUrlMapsClient +from google.cloud.compute_v1.services.region_url_maps import pagers +from google.cloud.compute_v1.services.region_url_maps import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionUrlMapsClient._get_default_mtls_endpoint(None) is None + assert RegionUrlMapsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionUrlMapsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionUrlMapsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionUrlMapsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionUrlMapsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionUrlMapsClient, +]) +def test_region_url_maps_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionUrlMapsRestTransport, "rest"), +]) +def test_region_url_maps_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionUrlMapsClient, +]) +def test_region_url_maps_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_url_maps_client_get_transport_class(): + transport = RegionUrlMapsClient.get_transport_class() + available_transports = [ + transports.RegionUrlMapsRestTransport, + ] + assert transport in available_transports + + transport = RegionUrlMapsClient.get_transport_class("rest") + assert transport == transports.RegionUrlMapsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest"), +]) +@mock.patch.object(RegionUrlMapsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionUrlMapsClient)) +def test_region_url_maps_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionUrlMapsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionUrlMapsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest", "true"), + (RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionUrlMapsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionUrlMapsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_region_url_maps_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest"), +]) +def test_region_url_maps_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionUrlMapsClient, transports.RegionUrlMapsRestTransport, "rest"), +]) +def test_region_url_maps_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + url_map='url_map_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRegionUrlMapRequest(), + project='project_value', + region='region_value', + url_map='url_map_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMap( + creation_timestamp='creation_timestamp_value', + default_service='default_service_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMap.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.UrlMap) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_service == 'default_service_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMap() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMap.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + url_map='url_map_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionUrlMapRequest(), + project='project_value', + region='region_value', + url_map='url_map_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/urlMaps" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRegionUrlMapRequest(), + project='project_value', + region='region_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionUrlMapsRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionUrlMapsRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/urlMaps" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionUrlMapsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.UrlMapList( + items=[ + compute.UrlMap(), + compute.UrlMap(), + compute.UrlMap(), + ], + next_page_token='abc', + ), + compute.UrlMapList( + items=[], + next_page_token='def', + ), + compute.UrlMapList( + items=[ + compute.UrlMap(), + ], + next_page_token='ghi', + ), + compute.UrlMapList( + items=[ + compute.UrlMap(), + compute.UrlMap(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.UrlMapList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.UrlMap) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchRegionUrlMapRequest(), + project='project_value', + region='region_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateRegionUrlMapRequest(), + project='project_value', + region='region_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + + +def test_validate_rest(transport: str = 'rest', request_type=compute.ValidateRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request_init["region_url_maps_validate_request_resource"] = compute.RegionUrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapsValidateResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapsValidateResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.validate(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.UrlMapsValidateResponse) + + +def test_validate_rest_bad_request(transport: str = 'rest', request_type=compute.ValidateRegionUrlMapRequest): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + request_init["region_url_maps_validate_request_resource"] = compute.RegionUrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.validate(request) + + +def test_validate_rest_from_dict(): + test_validate_rest(request_type=dict) + + +def test_validate_rest_flattened(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapsValidateResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapsValidateResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "url_map": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + url_map='url_map_value', + region_url_maps_validate_request_resource=compute.RegionUrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')), + ) + mock_args.update(sample_request) + client.validate(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/urlMaps/{url_map}/validate" % client.transport._host, args[1]) + + +def test_validate_rest_flattened_error(transport: str = 'rest'): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.validate( + compute.ValidateRegionUrlMapRequest(), + project='project_value', + region='region_value', + url_map='url_map_value', + region_url_maps_validate_request_resource=compute.RegionUrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionUrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionUrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionUrlMapsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionUrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionUrlMapsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionUrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionUrlMapsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionUrlMapsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_region_url_maps_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionUrlMapsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_region_url_maps_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.region_url_maps.transports.RegionUrlMapsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionUrlMapsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'update', + 'validate', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_region_url_maps_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.region_url_maps.transports.RegionUrlMapsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionUrlMapsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_region_url_maps_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.region_url_maps.transports.RegionUrlMapsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionUrlMapsTransport() + adc.assert_called_once() + + +def test_region_url_maps_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionUrlMapsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_region_url_maps_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionUrlMapsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_region_url_maps_host_no_port(): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_region_url_maps_host_with_port(): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionUrlMapsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionUrlMapsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionUrlMapsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionUrlMapsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionUrlMapsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionUrlMapsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionUrlMapsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionUrlMapsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionUrlMapsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionUrlMapsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionUrlMapsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionUrlMapsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionUrlMapsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionUrlMapsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionUrlMapsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionUrlMapsTransport, '_prep_wrapped_messages') as prep: + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionUrlMapsTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionUrlMapsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionUrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_regions.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_regions.py new file mode 100644 index 000000000..e15245b8c --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_regions.py @@ -0,0 +1,935 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.regions import RegionsClient +from google.cloud.compute_v1.services.regions import pagers +from google.cloud.compute_v1.services.regions import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RegionsClient._get_default_mtls_endpoint(None) is None + assert RegionsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RegionsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RegionsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RegionsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RegionsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RegionsClient, +]) +def test_regions_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RegionsRestTransport, "rest"), +]) +def test_regions_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RegionsClient, +]) +def test_regions_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_regions_client_get_transport_class(): + transport = RegionsClient.get_transport_class() + available_transports = [ + transports.RegionsRestTransport, + ] + assert transport in available_transports + + transport = RegionsClient.get_transport_class("rest") + assert transport == transports.RegionsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionsClient, transports.RegionsRestTransport, "rest"), +]) +@mock.patch.object(RegionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionsClient)) +def test_regions_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RegionsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RegionsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RegionsClient, transports.RegionsRestTransport, "rest", "true"), + (RegionsClient, transports.RegionsRestTransport, "rest", "false"), +]) +@mock.patch.object(RegionsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RegionsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_regions_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionsClient, transports.RegionsRestTransport, "rest"), +]) +def test_regions_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RegionsClient, transports.RegionsRestTransport, "rest"), +]) +def test_regions_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRegionRequest): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Region( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + status='status_value', + supports_pzs=True, + zones=['zones_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Region.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Region) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.supports_pzs is True + assert response.zones == ['zones_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRegionRequest): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Region() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Region.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRegionRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRegionsRequest): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRegionsRequest): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RegionList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RegionList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRegionsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RegionList( + items=[ + compute.Region(), + compute.Region(), + compute.Region(), + ], + next_page_token='abc', + ), + compute.RegionList( + items=[], + next_page_token='def', + ), + compute.RegionList( + items=[ + compute.Region(), + ], + next_page_token='ghi', + ), + compute.RegionList( + items=[ + compute.Region(), + compute.Region(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RegionList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Region) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RegionsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RegionsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RegionsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RegionsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RegionsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RegionsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RegionsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_regions_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RegionsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_regions_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.regions.transports.RegionsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RegionsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_regions_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.regions.transports.RegionsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_regions_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.regions.transports.RegionsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RegionsTransport() + adc.assert_called_once() + + +def test_regions_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RegionsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_regions_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RegionsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_regions_host_no_port(): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_regions_host_with_port(): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RegionsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RegionsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RegionsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RegionsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RegionsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RegionsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RegionsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RegionsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RegionsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RegionsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RegionsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RegionsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RegionsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RegionsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RegionsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RegionsTransport, '_prep_wrapped_messages') as prep: + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RegionsTransport, '_prep_wrapped_messages') as prep: + transport_class = RegionsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RegionsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_reservations.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_reservations.py new file mode 100644 index 000000000..ade7e85e7 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_reservations.py @@ -0,0 +1,1934 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.reservations import ReservationsClient +from google.cloud.compute_v1.services.reservations import pagers +from google.cloud.compute_v1.services.reservations import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ReservationsClient._get_default_mtls_endpoint(None) is None + assert ReservationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ReservationsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ReservationsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ReservationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ReservationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ReservationsClient, +]) +def test_reservations_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ReservationsRestTransport, "rest"), +]) +def test_reservations_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ReservationsClient, +]) +def test_reservations_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_reservations_client_get_transport_class(): + transport = ReservationsClient.get_transport_class() + available_transports = [ + transports.ReservationsRestTransport, + ] + assert transport in available_transports + + transport = ReservationsClient.get_transport_class("rest") + assert transport == transports.ReservationsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ReservationsClient, transports.ReservationsRestTransport, "rest"), +]) +@mock.patch.object(ReservationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ReservationsClient)) +def test_reservations_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ReservationsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ReservationsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ReservationsClient, transports.ReservationsRestTransport, "rest", "true"), + (ReservationsClient, transports.ReservationsRestTransport, "rest", "false"), +]) +@mock.patch.object(ReservationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ReservationsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_reservations_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ReservationsClient, transports.ReservationsRestTransport, "rest"), +]) +def test_reservations_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ReservationsClient, transports.ReservationsRestTransport, "rest"), +]) +def test_reservations_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListReservationsRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ReservationAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ReservationAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListReservationsRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ReservationAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ReservationAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/reservations" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListReservationsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ReservationAggregatedList( + items={ + 'a':compute.ReservationsScopedList(), + 'b':compute.ReservationsScopedList(), + 'c':compute.ReservationsScopedList(), + }, + next_page_token='abc', + ), + compute.ReservationAggregatedList( + items={}, + next_page_token='def', + ), + compute.ReservationAggregatedList( + items={ + 'g':compute.ReservationsScopedList(), + }, + next_page_token='ghi', + ), + compute.ReservationAggregatedList( + items={ + 'h':compute.ReservationsScopedList(), + 'i':compute.ReservationsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ReservationAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.ReservationsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.ReservationsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.ReservationsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + reservation='reservation_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations/{reservation}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteReservationRequest(), + project='project_value', + zone='zone_value', + reservation='reservation_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Reservation( + commitment='commitment_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + satisfies_pzs=True, + self_link='self_link_value', + specific_reservation_required=True, + status='status_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Reservation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Reservation) + assert response.commitment == 'commitment_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.specific_reservation_required is True + assert response.status == 'status_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Reservation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Reservation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + reservation='reservation_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations/{reservation}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetReservationRequest(), + project='project_value', + zone='zone_value', + reservation='reservation_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyReservationRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["reservation_resource"] = compute.Reservation(commitment='commitment_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["reservation_resource"] = compute.Reservation(commitment='commitment_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + reservation_resource=compute.Reservation(commitment='commitment_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertReservationRequest(), + project='project_value', + zone='zone_value', + reservation_resource=compute.Reservation(commitment='commitment_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListReservationsRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ReservationList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ReservationList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListReservationsRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ReservationList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ReservationList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListReservationsRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ReservationList( + items=[ + compute.Reservation(), + compute.Reservation(), + compute.Reservation(), + ], + next_page_token='abc', + ), + compute.ReservationList( + items=[], + next_page_token='def', + ), + compute.ReservationList( + items=[ + compute.Reservation(), + ], + next_page_token='ghi', + ), + compute.ReservationList( + items=[ + compute.Reservation(), + compute.Reservation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ReservationList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Reservation) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_resize_rest(transport: str = 'rest', request_type=compute.ResizeReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + request_init["reservations_resize_request_resource"] = compute.ReservationsResizeRequest(specific_sku_count=1920) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.resize(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_resize_rest_bad_request(transport: str = 'rest', request_type=compute.ResizeReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + request_init["reservations_resize_request_resource"] = compute.ReservationsResizeRequest(specific_sku_count=1920) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.resize(request) + + +def test_resize_rest_from_dict(): + test_resize_rest(request_type=dict) + + +def test_resize_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "reservation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + reservation='reservation_value', + reservations_resize_request_resource=compute.ReservationsResizeRequest(specific_sku_count=1920), + ) + mock_args.update(sample_request) + client.resize(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations/{reservation}/resize" % client.transport._host, args[1]) + + +def test_resize_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resize( + compute.ResizeReservationRequest(), + project='project_value', + zone='zone_value', + reservation='reservation_value', + reservations_resize_request_resource=compute.ReservationsResizeRequest(specific_sku_count=1920), + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["zone_set_policy_request_resource"] = compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyReservationRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + zone_set_policy_request_resource=compute.ZoneSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsReservationRequest): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/reservations/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsReservationRequest(), + project='project_value', + zone='zone_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ReservationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ReservationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ReservationsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ReservationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ReservationsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ReservationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ReservationsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ReservationsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_reservations_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ReservationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_reservations_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.reservations.transports.ReservationsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ReservationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'resize', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_reservations_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ReservationsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_reservations_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.reservations.transports.ReservationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ReservationsTransport() + adc.assert_called_once() + + +def test_reservations_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ReservationsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_reservations_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ReservationsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_reservations_host_no_port(): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_reservations_host_with_port(): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ReservationsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ReservationsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ReservationsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ReservationsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ReservationsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ReservationsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ReservationsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ReservationsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ReservationsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ReservationsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ReservationsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ReservationsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ReservationsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ReservationsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ReservationsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ReservationsTransport, '_prep_wrapped_messages') as prep: + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ReservationsTransport, '_prep_wrapped_messages') as prep: + transport_class = ReservationsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ReservationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_resource_policies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_resource_policies.py new file mode 100644 index 000000000..4cfe73910 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_resource_policies.py @@ -0,0 +1,1776 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.resource_policies import ResourcePoliciesClient +from google.cloud.compute_v1.services.resource_policies import pagers +from google.cloud.compute_v1.services.resource_policies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ResourcePoliciesClient._get_default_mtls_endpoint(None) is None + assert ResourcePoliciesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ResourcePoliciesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ResourcePoliciesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ResourcePoliciesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ResourcePoliciesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ResourcePoliciesClient, +]) +def test_resource_policies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ResourcePoliciesRestTransport, "rest"), +]) +def test_resource_policies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ResourcePoliciesClient, +]) +def test_resource_policies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_resource_policies_client_get_transport_class(): + transport = ResourcePoliciesClient.get_transport_class() + available_transports = [ + transports.ResourcePoliciesRestTransport, + ] + assert transport in available_transports + + transport = ResourcePoliciesClient.get_transport_class("rest") + assert transport == transports.ResourcePoliciesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ResourcePoliciesClient, transports.ResourcePoliciesRestTransport, "rest"), +]) +@mock.patch.object(ResourcePoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ResourcePoliciesClient)) +def test_resource_policies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ResourcePoliciesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ResourcePoliciesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ResourcePoliciesClient, transports.ResourcePoliciesRestTransport, "rest", "true"), + (ResourcePoliciesClient, transports.ResourcePoliciesRestTransport, "rest", "false"), +]) +@mock.patch.object(ResourcePoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ResourcePoliciesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_resource_policies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ResourcePoliciesClient, transports.ResourcePoliciesRestTransport, "rest"), +]) +def test_resource_policies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ResourcePoliciesClient, transports.ResourcePoliciesRestTransport, "rest"), +]) +def test_resource_policies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListResourcePoliciesRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ResourcePolicyAggregatedList( + etag='etag_value', + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ResourcePolicyAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.etag == 'etag_value' + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListResourcePoliciesRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ResourcePolicyAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ResourcePolicyAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/resourcePolicies" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListResourcePoliciesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ResourcePolicyAggregatedList( + items={ + 'a':compute.ResourcePoliciesScopedList(), + 'b':compute.ResourcePoliciesScopedList(), + 'c':compute.ResourcePoliciesScopedList(), + }, + next_page_token='abc', + ), + compute.ResourcePolicyAggregatedList( + items={}, + next_page_token='def', + ), + compute.ResourcePolicyAggregatedList( + items={ + 'g':compute.ResourcePoliciesScopedList(), + }, + next_page_token='ghi', + ), + compute.ResourcePolicyAggregatedList( + items={ + 'h':compute.ResourcePoliciesScopedList(), + 'i':compute.ResourcePoliciesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ResourcePolicyAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.ResourcePoliciesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.ResourcePoliciesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.ResourcePoliciesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource_policy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource_policy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource_policy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource_policy='resource_policy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteResourcePolicyRequest(), + project='project_value', + region='region_value', + resource_policy='resource_policy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource_policy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ResourcePolicy( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + status='status_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ResourcePolicy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.ResourcePolicy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource_policy": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ResourcePolicy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ResourcePolicy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource_policy": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource_policy='resource_policy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource_policy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetResourcePolicyRequest(), + project='project_value', + region='region_value', + resource_policy='resource_policy_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyResourcePolicyRequest(), + project='project_value', + region='region_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["resource_policy_resource"] = compute.ResourcePolicy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["resource_policy_resource"] = compute.ResourcePolicy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource_policy_resource=compute.ResourcePolicy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertResourcePolicyRequest(), + project='project_value', + region='region_value', + resource_policy_resource=compute.ResourcePolicy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListResourcePoliciesRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ResourcePolicyList( + etag='etag_value', + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ResourcePolicyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.etag == 'etag_value' + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListResourcePoliciesRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ResourcePolicyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ResourcePolicyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListResourcePoliciesRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ResourcePolicyList( + items=[ + compute.ResourcePolicy(), + compute.ResourcePolicy(), + compute.ResourcePolicy(), + ], + next_page_token='abc', + ), + compute.ResourcePolicyList( + items=[], + next_page_token='def', + ), + compute.ResourcePolicyList( + items=[ + compute.ResourcePolicy(), + ], + next_page_token='ghi', + ), + compute.ResourcePolicyList( + items=[ + compute.ResourcePolicy(), + compute.ResourcePolicy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ResourcePolicyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ResourcePolicy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyResourcePolicyRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsResourcePolicyRequest): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/resourcePolicies/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsResourcePolicyRequest(), + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ResourcePoliciesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ResourcePoliciesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ResourcePoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ResourcePoliciesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ResourcePoliciesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_resource_policies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ResourcePoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_resource_policies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.resource_policies.transports.ResourcePoliciesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ResourcePoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_resource_policies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.resource_policies.transports.ResourcePoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ResourcePoliciesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_resource_policies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.resource_policies.transports.ResourcePoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ResourcePoliciesTransport() + adc.assert_called_once() + + +def test_resource_policies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ResourcePoliciesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_resource_policies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ResourcePoliciesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_resource_policies_host_no_port(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_resource_policies_host_with_port(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ResourcePoliciesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ResourcePoliciesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ResourcePoliciesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ResourcePoliciesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ResourcePoliciesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ResourcePoliciesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ResourcePoliciesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ResourcePoliciesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ResourcePoliciesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ResourcePoliciesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ResourcePoliciesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ResourcePoliciesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ResourcePoliciesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ResourcePoliciesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ResourcePoliciesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ResourcePoliciesTransport, '_prep_wrapped_messages') as prep: + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ResourcePoliciesTransport, '_prep_wrapped_messages') as prep: + transport_class = ResourcePoliciesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ResourcePoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_routers.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_routers.py new file mode 100644 index 000000000..316dbf7be --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_routers.py @@ -0,0 +1,2139 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.routers import RoutersClient +from google.cloud.compute_v1.services.routers import pagers +from google.cloud.compute_v1.services.routers import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RoutersClient._get_default_mtls_endpoint(None) is None + assert RoutersClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RoutersClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RoutersClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RoutersClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RoutersClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RoutersClient, +]) +def test_routers_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RoutersRestTransport, "rest"), +]) +def test_routers_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RoutersClient, +]) +def test_routers_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_routers_client_get_transport_class(): + transport = RoutersClient.get_transport_class() + available_transports = [ + transports.RoutersRestTransport, + ] + assert transport in available_transports + + transport = RoutersClient.get_transport_class("rest") + assert transport == transports.RoutersRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RoutersClient, transports.RoutersRestTransport, "rest"), +]) +@mock.patch.object(RoutersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RoutersClient)) +def test_routers_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RoutersClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RoutersClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RoutersClient, transports.RoutersRestTransport, "rest", "true"), + (RoutersClient, transports.RoutersRestTransport, "rest", "false"), +]) +@mock.patch.object(RoutersClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RoutersClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_routers_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RoutersClient, transports.RoutersRestTransport, "rest"), +]) +def test_routers_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RoutersClient, transports.RoutersRestTransport, "rest"), +]) +def test_routers_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListRoutersRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouterAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouterAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListRoutersRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouterAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouterAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/routers" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListRoutersRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RouterAggregatedList( + items={ + 'a':compute.RoutersScopedList(), + 'b':compute.RoutersScopedList(), + 'c':compute.RoutersScopedList(), + }, + next_page_token='abc', + ), + compute.RouterAggregatedList( + items={}, + next_page_token='def', + ), + compute.RouterAggregatedList( + items={ + 'g':compute.RoutersScopedList(), + }, + next_page_token='ghi', + ), + compute.RouterAggregatedList( + items={ + 'h':compute.RoutersScopedList(), + 'i':compute.RoutersScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RouterAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.RoutersScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.RoutersScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.RoutersScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router='router_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers/{router}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRouterRequest(), + project='project_value', + region='region_value', + router='router_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Router( + creation_timestamp='creation_timestamp_value', + description='description_value', + encrypted_interconnect_router=True, + id=205, + kind='kind_value', + name='name_value', + network='network_value', + region='region_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Router.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Router) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.encrypted_interconnect_router is True + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Router() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Router.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router='router_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers/{router}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRouterRequest(), + project='project_value', + region='region_value', + router='router_value', + ) + + +def test_get_nat_mapping_info_rest(transport: str = 'rest', request_type=compute.GetNatMappingInfoRoutersRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VmEndpointNatMappingsList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VmEndpointNatMappingsList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_nat_mapping_info(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.GetNatMappingInfoPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_get_nat_mapping_info_rest_bad_request(transport: str = 'rest', request_type=compute.GetNatMappingInfoRoutersRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_nat_mapping_info(request) + + +def test_get_nat_mapping_info_rest_from_dict(): + test_get_nat_mapping_info_rest(request_type=dict) + + +def test_get_nat_mapping_info_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VmEndpointNatMappingsList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VmEndpointNatMappingsList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router='router_value', + ) + mock_args.update(sample_request) + client.get_nat_mapping_info(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers/{router}/getNatMappingInfo" % client.transport._host, args[1]) + + +def test_get_nat_mapping_info_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_nat_mapping_info( + compute.GetNatMappingInfoRoutersRequest(), + project='project_value', + region='region_value', + router='router_value', + ) + + +def test_get_nat_mapping_info_rest_pager(): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.VmEndpointNatMappingsList( + result=[ + compute.VmEndpointNatMappings(), + compute.VmEndpointNatMappings(), + compute.VmEndpointNatMappings(), + ], + next_page_token='abc', + ), + compute.VmEndpointNatMappingsList( + result=[], + next_page_token='def', + ), + compute.VmEndpointNatMappingsList( + result=[ + compute.VmEndpointNatMappings(), + ], + next_page_token='ghi', + ), + compute.VmEndpointNatMappingsList( + result=[ + compute.VmEndpointNatMappings(), + compute.VmEndpointNatMappings(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.VmEndpointNatMappingsList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + pager = client.get_nat_mapping_info(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.VmEndpointNatMappings) + for i in results) + + pages = list(client.get_nat_mapping_info(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_get_router_status_rest(transport: str = 'rest', request_type=compute.GetRouterStatusRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouterStatusResponse( + kind='kind_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouterStatusResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_router_status(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.RouterStatusResponse) + assert response.kind == 'kind_value' + + +def test_get_router_status_rest_bad_request(transport: str = 'rest', request_type=compute.GetRouterStatusRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_router_status(request) + + +def test_get_router_status_rest_from_dict(): + test_get_router_status_rest(request_type=dict) + + +def test_get_router_status_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouterStatusResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouterStatusResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router='router_value', + ) + mock_args.update(sample_request) + client.get_router_status(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers/{router}/getRouterStatus" % client.transport._host, args[1]) + + +def test_get_router_status_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_router_status( + compute.GetRouterStatusRouterRequest(), + project='project_value', + region='region_value', + router='router_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRouterRequest(), + project='project_value', + region='region_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRoutersRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouterList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouterList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRoutersRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouterList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouterList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRoutersRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RouterList( + items=[ + compute.Router(), + compute.Router(), + compute.Router(), + ], + next_page_token='abc', + ), + compute.RouterList( + items=[], + next_page_token='def', + ), + compute.RouterList( + items=[ + compute.Router(), + ], + next_page_token='ghi', + ), + compute.RouterList( + items=[ + compute.Router(), + compute.Router(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RouterList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Router) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router='router_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers/{router}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchRouterRequest(), + project='project_value', + region='region_value', + router='router_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + + +def test_preview_rest(transport: str = 'rest', request_type=compute.PreviewRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RoutersPreviewResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RoutersPreviewResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.preview(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.RoutersPreviewResponse) + + +def test_preview_rest_bad_request(transport: str = 'rest', request_type=compute.PreviewRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.preview(request) + + +def test_preview_rest_from_dict(): + test_preview_rest(request_type=dict) + + +def test_preview_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RoutersPreviewResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RoutersPreviewResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router='router_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + mock_args.update(sample_request) + client.preview(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers/{router}/preview" % client.transport._host, args[1]) + + +def test_preview_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.preview( + compute.PreviewRouterRequest(), + project='project_value', + region='region_value', + router='router_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateRouterRequest): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "router": "sample3"} + request_init["router_resource"] = compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "router": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + router='router_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/routers/{router}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateRouterRequest(), + project='project_value', + region='region_value', + router='router_value', + router_resource=compute.Router(bgp=compute.RouterBgp(advertise_mode='advertise_mode_value')), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RoutersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RoutersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RoutersClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RoutersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RoutersClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RoutersRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RoutersClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RoutersRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_routers_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RoutersTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_routers_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.routers.transports.RoutersTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RoutersTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'get_nat_mapping_info', + 'get_router_status', + 'insert', + 'list', + 'patch', + 'preview', + 'update', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_routers_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.routers.transports.RoutersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RoutersTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_routers_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.routers.transports.RoutersTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RoutersTransport() + adc.assert_called_once() + + +def test_routers_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RoutersClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_routers_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RoutersRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_routers_host_no_port(): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_routers_host_with_port(): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RoutersClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RoutersClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RoutersClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RoutersClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RoutersClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RoutersClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RoutersClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RoutersClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RoutersClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RoutersClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RoutersClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RoutersClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RoutersClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RoutersClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RoutersClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RoutersTransport, '_prep_wrapped_messages') as prep: + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RoutersTransport, '_prep_wrapped_messages') as prep: + transport_class = RoutersClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RoutersClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_routes.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_routes.py new file mode 100644 index 000000000..0754fc22b --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_routes.py @@ -0,0 +1,1253 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.routes import RoutesClient +from google.cloud.compute_v1.services.routes import pagers +from google.cloud.compute_v1.services.routes import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert RoutesClient._get_default_mtls_endpoint(None) is None + assert RoutesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert RoutesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert RoutesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert RoutesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert RoutesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + RoutesClient, +]) +def test_routes_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.RoutesRestTransport, "rest"), +]) +def test_routes_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + RoutesClient, +]) +def test_routes_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_routes_client_get_transport_class(): + transport = RoutesClient.get_transport_class() + available_transports = [ + transports.RoutesRestTransport, + ] + assert transport in available_transports + + transport = RoutesClient.get_transport_class("rest") + assert transport == transports.RoutesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RoutesClient, transports.RoutesRestTransport, "rest"), +]) +@mock.patch.object(RoutesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RoutesClient)) +def test_routes_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(RoutesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(RoutesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (RoutesClient, transports.RoutesRestTransport, "rest", "true"), + (RoutesClient, transports.RoutesRestTransport, "rest", "false"), +]) +@mock.patch.object(RoutesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(RoutesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_routes_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RoutesClient, transports.RoutesRestTransport, "rest"), +]) +def test_routes_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (RoutesClient, transports.RoutesRestTransport, "rest"), +]) +def test_routes_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteRouteRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "route": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteRouteRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "route": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "route": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + route='route_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/routes/{route}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteRouteRequest(), + project='project_value', + route='route_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetRouteRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "route": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Route( + creation_timestamp='creation_timestamp_value', + description='description_value', + dest_range='dest_range_value', + id=205, + kind='kind_value', + name='name_value', + network='network_value', + next_hop_gateway='next_hop_gateway_value', + next_hop_ilb='next_hop_ilb_value', + next_hop_instance='next_hop_instance_value', + next_hop_ip='next_hop_ip_value', + next_hop_network='next_hop_network_value', + next_hop_peering='next_hop_peering_value', + next_hop_vpn_tunnel='next_hop_vpn_tunnel_value', + priority=898, + route_type='route_type_value', + self_link='self_link_value', + tags=['tags_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Route.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Route) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.dest_range == 'dest_range_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.next_hop_gateway == 'next_hop_gateway_value' + assert response.next_hop_ilb == 'next_hop_ilb_value' + assert response.next_hop_instance == 'next_hop_instance_value' + assert response.next_hop_ip == 'next_hop_ip_value' + assert response.next_hop_network == 'next_hop_network_value' + assert response.next_hop_peering == 'next_hop_peering_value' + assert response.next_hop_vpn_tunnel == 'next_hop_vpn_tunnel_value' + assert response.priority == 898 + assert response.route_type == 'route_type_value' + assert response.self_link == 'self_link_value' + assert response.tags == ['tags_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetRouteRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "route": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Route() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Route.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "route": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + route='route_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/routes/{route}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetRouteRequest(), + project='project_value', + route='route_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertRouteRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["route_resource"] = compute.Route(as_paths=[compute.RouteAsPath(as_lists=[866])]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertRouteRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["route_resource"] = compute.Route(as_paths=[compute.RouteAsPath(as_lists=[866])]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + route_resource=compute.Route(as_paths=[compute.RouteAsPath(as_lists=[866])]), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/routes" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertRouteRequest(), + project='project_value', + route_resource=compute.Route(as_paths=[compute.RouteAsPath(as_lists=[866])]), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListRoutesRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouteList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouteList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListRoutesRequest): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.RouteList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.RouteList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/routes" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListRoutesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.RouteList( + items=[ + compute.Route(), + compute.Route(), + compute.Route(), + ], + next_page_token='abc', + ), + compute.RouteList( + items=[], + next_page_token='def', + ), + compute.RouteList( + items=[ + compute.Route(), + ], + next_page_token='ghi', + ), + compute.RouteList( + items=[ + compute.Route(), + compute.Route(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.RouteList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Route) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.RoutesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.RoutesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RoutesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.RoutesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = RoutesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.RoutesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = RoutesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.RoutesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_routes_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.RoutesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_routes_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.routes.transports.RoutesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.RoutesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_routes_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.routes.transports.RoutesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RoutesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_routes_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.routes.transports.RoutesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.RoutesTransport() + adc.assert_called_once() + + +def test_routes_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + RoutesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_routes_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.RoutesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_routes_host_no_port(): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_routes_host_with_port(): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = RoutesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = RoutesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = RoutesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = RoutesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = RoutesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = RoutesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = RoutesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = RoutesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = RoutesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = RoutesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = RoutesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = RoutesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = RoutesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = RoutesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = RoutesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.RoutesTransport, '_prep_wrapped_messages') as prep: + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.RoutesTransport, '_prep_wrapped_messages') as prep: + transport_class = RoutesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = RoutesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_security_policies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_security_policies.py new file mode 100644 index 000000000..352c8951a --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_security_policies.py @@ -0,0 +1,2061 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.security_policies import SecurityPoliciesClient +from google.cloud.compute_v1.services.security_policies import pagers +from google.cloud.compute_v1.services.security_policies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SecurityPoliciesClient._get_default_mtls_endpoint(None) is None + assert SecurityPoliciesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SecurityPoliciesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SecurityPoliciesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SecurityPoliciesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SecurityPoliciesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + SecurityPoliciesClient, +]) +def test_security_policies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SecurityPoliciesRestTransport, "rest"), +]) +def test_security_policies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + SecurityPoliciesClient, +]) +def test_security_policies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_security_policies_client_get_transport_class(): + transport = SecurityPoliciesClient.get_transport_class() + available_transports = [ + transports.SecurityPoliciesRestTransport, + ] + assert transport in available_transports + + transport = SecurityPoliciesClient.get_transport_class("rest") + assert transport == transports.SecurityPoliciesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SecurityPoliciesClient, transports.SecurityPoliciesRestTransport, "rest"), +]) +@mock.patch.object(SecurityPoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecurityPoliciesClient)) +def test_security_policies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SecurityPoliciesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SecurityPoliciesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SecurityPoliciesClient, transports.SecurityPoliciesRestTransport, "rest", "true"), + (SecurityPoliciesClient, transports.SecurityPoliciesRestTransport, "rest", "false"), +]) +@mock.patch.object(SecurityPoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SecurityPoliciesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_security_policies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SecurityPoliciesClient, transports.SecurityPoliciesRestTransport, "rest"), +]) +def test_security_policies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SecurityPoliciesClient, transports.SecurityPoliciesRestTransport, "rest"), +]) +def test_security_policies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_rule_rest(transport: str = 'rest', request_type=compute.AddRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request_init["security_policy_rule_resource"] = compute.SecurityPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_rule_rest_bad_request(transport: str = 'rest', request_type=compute.AddRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request_init["security_policy_rule_resource"] = compute.SecurityPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_rule(request) + + +def test_add_rule_rest_from_dict(): + test_add_rule_rest(request_type=dict) + + +def test_add_rule_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "security_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy='security_policy_value', + security_policy_rule_resource=compute.SecurityPolicyRule(action='action_value'), + ) + mock_args.update(sample_request) + client.add_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/addRule" % client.transport._host, args[1]) + + +def test_add_rule_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_rule( + compute.AddRuleSecurityPolicyRequest(), + project='project_value', + security_policy='security_policy_value', + security_policy_rule_resource=compute.SecurityPolicyRule(action='action_value'), + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "security_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy='security_policy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/{security_policy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteSecurityPolicyRequest(), + project='project_value', + security_policy='security_policy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPolicy( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPolicy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SecurityPolicy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPolicy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPolicy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "security_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy='security_policy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/{security_policy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetSecurityPolicyRequest(), + project='project_value', + security_policy='security_policy_value', + ) + + +def test_get_rule_rest(transport: str = 'rest', request_type=compute.GetRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPolicyRule( + action='action_value', + description='description_value', + kind='kind_value', + preview=True, + priority=898, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPolicyRule.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SecurityPolicyRule) + assert response.action == 'action_value' + assert response.description == 'description_value' + assert response.kind == 'kind_value' + assert response.preview is True + assert response.priority == 898 + + +def test_get_rule_rest_bad_request(transport: str = 'rest', request_type=compute.GetRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_rule(request) + + +def test_get_rule_rest_from_dict(): + test_get_rule_rest(request_type=dict) + + +def test_get_rule_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPolicyRule() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPolicyRule.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "security_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy='security_policy_value', + ) + mock_args.update(sample_request) + client.get_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/getRule" % client.transport._host, args[1]) + + +def test_get_rule_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_rule( + compute.GetRuleSecurityPolicyRequest(), + project='project_value', + security_policy='security_policy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["security_policy_resource"] = compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["security_policy_resource"] = compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy_resource=compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertSecurityPolicyRequest(), + project='project_value', + security_policy_resource=compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListSecurityPoliciesRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPolicyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPolicyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListSecurityPoliciesRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPolicyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPolicyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListSecurityPoliciesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SecurityPolicyList( + items=[ + compute.SecurityPolicy(), + compute.SecurityPolicy(), + compute.SecurityPolicy(), + ], + next_page_token='abc', + ), + compute.SecurityPolicyList( + items=[], + next_page_token='def', + ), + compute.SecurityPolicyList( + items=[ + compute.SecurityPolicy(), + ], + next_page_token='ghi', + ), + compute.SecurityPolicyList( + items=[ + compute.SecurityPolicy(), + compute.SecurityPolicy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SecurityPolicyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.SecurityPolicy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_preconfigured_expression_sets_rest(transport: str = 'rest', request_type=compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_preconfigured_expression_sets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse) + + +def test_list_preconfigured_expression_sets_rest_bad_request(transport: str = 'rest', request_type=compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_preconfigured_expression_sets(request) + + +def test_list_preconfigured_expression_sets_rest_from_dict(): + test_list_preconfigured_expression_sets_rest(request_type=dict) + + +def test_list_preconfigured_expression_sets_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SecurityPoliciesListPreconfiguredExpressionSetsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list_preconfigured_expression_sets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/listPreconfiguredExpressionSets" % client.transport._host, args[1]) + + +def test_list_preconfigured_expression_sets_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_preconfigured_expression_sets( + compute.ListPreconfiguredExpressionSetsSecurityPoliciesRequest(), + project='project_value', + ) + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request_init["security_policy_resource"] = compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request_init["security_policy_resource"] = compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "security_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy='security_policy_value', + security_policy_resource=compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/{security_policy}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchSecurityPolicyRequest(), + project='project_value', + security_policy='security_policy_value', + security_policy_resource=compute.SecurityPolicy(adaptive_protection_config=compute.SecurityPolicyAdaptiveProtectionConfig(layer7_ddos_defense_config=compute.SecurityPolicyAdaptiveProtectionConfigLayer7DdosDefenseConfig(enable=True))), + ) + + +def test_patch_rule_rest(transport: str = 'rest', request_type=compute.PatchRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request_init["security_policy_rule_resource"] = compute.SecurityPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rule_rest_bad_request(transport: str = 'rest', request_type=compute.PatchRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request_init["security_policy_rule_resource"] = compute.SecurityPolicyRule(action='action_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch_rule(request) + + +def test_patch_rule_rest_from_dict(): + test_patch_rule_rest(request_type=dict) + + +def test_patch_rule_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "security_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy='security_policy_value', + security_policy_rule_resource=compute.SecurityPolicyRule(action='action_value'), + ) + mock_args.update(sample_request) + client.patch_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/patchRule" % client.transport._host, args[1]) + + +def test_patch_rule_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch_rule( + compute.PatchRuleSecurityPolicyRequest(), + project='project_value', + security_policy='security_policy_value', + security_policy_rule_resource=compute.SecurityPolicyRule(action='action_value'), + ) + + +def test_remove_rule_rest(transport: str = 'rest', request_type=compute.RemoveRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_rule(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_rule_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveRuleSecurityPolicyRequest): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "security_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_rule(request) + + +def test_remove_rule_rest_from_dict(): + test_remove_rule_rest(request_type=dict) + + +def test_remove_rule_rest_flattened(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "security_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + security_policy='security_policy_value', + ) + mock_args.update(sample_request) + client.remove_rule(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/securityPolicies/{security_policy}/removeRule" % client.transport._host, args[1]) + + +def test_remove_rule_rest_flattened_error(transport: str = 'rest'): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_rule( + compute.RemoveRuleSecurityPolicyRequest(), + project='project_value', + security_policy='security_policy_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SecurityPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SecurityPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SecurityPoliciesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SecurityPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SecurityPoliciesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SecurityPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SecurityPoliciesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.SecurityPoliciesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_security_policies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SecurityPoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_security_policies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.security_policies.transports.SecurityPoliciesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SecurityPoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_rule', + 'delete', + 'get', + 'get_rule', + 'insert', + 'list', + 'list_preconfigured_expression_sets', + 'patch', + 'patch_rule', + 'remove_rule', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_security_policies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.security_policies.transports.SecurityPoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SecurityPoliciesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_security_policies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.security_policies.transports.SecurityPoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SecurityPoliciesTransport() + adc.assert_called_once() + + +def test_security_policies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SecurityPoliciesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_security_policies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.SecurityPoliciesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_security_policies_host_no_port(): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_security_policies_host_with_port(): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SecurityPoliciesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = SecurityPoliciesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SecurityPoliciesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = SecurityPoliciesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = SecurityPoliciesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SecurityPoliciesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SecurityPoliciesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = SecurityPoliciesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SecurityPoliciesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = SecurityPoliciesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = SecurityPoliciesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SecurityPoliciesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SecurityPoliciesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = SecurityPoliciesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SecurityPoliciesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SecurityPoliciesTransport, '_prep_wrapped_messages') as prep: + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SecurityPoliciesTransport, '_prep_wrapped_messages') as prep: + transport_class = SecurityPoliciesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = SecurityPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_service_attachments.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_service_attachments.py new file mode 100644 index 000000000..79f6aa646 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_service_attachments.py @@ -0,0 +1,1940 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.service_attachments import ServiceAttachmentsClient +from google.cloud.compute_v1.services.service_attachments import pagers +from google.cloud.compute_v1.services.service_attachments import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ServiceAttachmentsClient._get_default_mtls_endpoint(None) is None + assert ServiceAttachmentsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ServiceAttachmentsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ServiceAttachmentsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ServiceAttachmentsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ServiceAttachmentsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ServiceAttachmentsClient, +]) +def test_service_attachments_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ServiceAttachmentsRestTransport, "rest"), +]) +def test_service_attachments_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ServiceAttachmentsClient, +]) +def test_service_attachments_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_service_attachments_client_get_transport_class(): + transport = ServiceAttachmentsClient.get_transport_class() + available_transports = [ + transports.ServiceAttachmentsRestTransport, + ] + assert transport in available_transports + + transport = ServiceAttachmentsClient.get_transport_class("rest") + assert transport == transports.ServiceAttachmentsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ServiceAttachmentsClient, transports.ServiceAttachmentsRestTransport, "rest"), +]) +@mock.patch.object(ServiceAttachmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ServiceAttachmentsClient)) +def test_service_attachments_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ServiceAttachmentsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ServiceAttachmentsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ServiceAttachmentsClient, transports.ServiceAttachmentsRestTransport, "rest", "true"), + (ServiceAttachmentsClient, transports.ServiceAttachmentsRestTransport, "rest", "false"), +]) +@mock.patch.object(ServiceAttachmentsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ServiceAttachmentsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_service_attachments_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ServiceAttachmentsClient, transports.ServiceAttachmentsRestTransport, "rest"), +]) +def test_service_attachments_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ServiceAttachmentsClient, transports.ServiceAttachmentsRestTransport, "rest"), +]) +def test_service_attachments_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListServiceAttachmentsRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ServiceAttachmentAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ServiceAttachmentAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListServiceAttachmentsRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ServiceAttachmentAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ServiceAttachmentAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/serviceAttachments" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListServiceAttachmentsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ServiceAttachmentAggregatedList( + items={ + 'a':compute.ServiceAttachmentsScopedList(), + 'b':compute.ServiceAttachmentsScopedList(), + 'c':compute.ServiceAttachmentsScopedList(), + }, + next_page_token='abc', + ), + compute.ServiceAttachmentAggregatedList( + items={}, + next_page_token='def', + ), + compute.ServiceAttachmentAggregatedList( + items={ + 'g':compute.ServiceAttachmentsScopedList(), + }, + next_page_token='ghi', + ), + compute.ServiceAttachmentAggregatedList( + items={ + 'h':compute.ServiceAttachmentsScopedList(), + 'i':compute.ServiceAttachmentsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ServiceAttachmentAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.ServiceAttachmentsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.ServiceAttachmentsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.ServiceAttachmentsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + service_attachment='service_attachment_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{service_attachment}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteServiceAttachmentRequest(), + project='project_value', + region='region_value', + service_attachment='service_attachment_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ServiceAttachment( + connection_preference='connection_preference_value', + consumer_reject_lists=['consumer_reject_lists_value'], + creation_timestamp='creation_timestamp_value', + description='description_value', + enable_proxy_protocol=True, + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + nat_subnets=['nat_subnets_value'], + producer_forwarding_rule='producer_forwarding_rule_value', + region='region_value', + self_link='self_link_value', + target_service='target_service_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ServiceAttachment.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.ServiceAttachment) + assert response.connection_preference == 'connection_preference_value' + assert response.consumer_reject_lists == ['consumer_reject_lists_value'] + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.enable_proxy_protocol is True + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.nat_subnets == ['nat_subnets_value'] + assert response.producer_forwarding_rule == 'producer_forwarding_rule_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.target_service == 'target_service_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ServiceAttachment() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ServiceAttachment.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + service_attachment='service_attachment_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{service_attachment}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetServiceAttachmentRequest(), + project='project_value', + region='region_value', + service_attachment='service_attachment_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicyServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicyServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicyServiceAttachmentRequest(), + project='project_value', + region='region_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["service_attachment_resource"] = compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["service_attachment_resource"] = compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + service_attachment_resource=compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertServiceAttachmentRequest(), + project='project_value', + region='region_value', + service_attachment_resource=compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListServiceAttachmentsRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ServiceAttachmentList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ServiceAttachmentList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListServiceAttachmentsRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ServiceAttachmentList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ServiceAttachmentList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListServiceAttachmentsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ServiceAttachmentList( + items=[ + compute.ServiceAttachment(), + compute.ServiceAttachment(), + compute.ServiceAttachment(), + ], + next_page_token='abc', + ), + compute.ServiceAttachmentList( + items=[], + next_page_token='def', + ), + compute.ServiceAttachmentList( + items=[ + compute.ServiceAttachment(), + ], + next_page_token='ghi', + ), + compute.ServiceAttachmentList( + items=[ + compute.ServiceAttachment(), + compute.ServiceAttachment(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ServiceAttachmentList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.ServiceAttachment) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + request_init["service_attachment_resource"] = compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + request_init["service_attachment_resource"] = compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "service_attachment": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + service_attachment='service_attachment_value', + service_attachment_resource=compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{service_attachment}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchServiceAttachmentRequest(), + project='project_value', + region='region_value', + service_attachment='service_attachment_value', + service_attachment_resource=compute.ServiceAttachment(connected_endpoints=[compute.ServiceAttachmentConnectedEndpoint(endpoint='endpoint_value')]), + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicyServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicyServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicyServiceAttachmentRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsServiceAttachmentRequest): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/serviceAttachments/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsServiceAttachmentRequest(), + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ServiceAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ServiceAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ServiceAttachmentsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ServiceAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ServiceAttachmentsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ServiceAttachmentsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ServiceAttachmentsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ServiceAttachmentsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_service_attachments_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ServiceAttachmentsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_service_attachments_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.service_attachments.transports.ServiceAttachmentsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ServiceAttachmentsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'patch', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_service_attachments_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.service_attachments.transports.ServiceAttachmentsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ServiceAttachmentsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_service_attachments_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.service_attachments.transports.ServiceAttachmentsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ServiceAttachmentsTransport() + adc.assert_called_once() + + +def test_service_attachments_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ServiceAttachmentsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_service_attachments_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ServiceAttachmentsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_service_attachments_host_no_port(): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_service_attachments_host_with_port(): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ServiceAttachmentsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ServiceAttachmentsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceAttachmentsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ServiceAttachmentsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ServiceAttachmentsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceAttachmentsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ServiceAttachmentsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ServiceAttachmentsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceAttachmentsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ServiceAttachmentsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ServiceAttachmentsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceAttachmentsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ServiceAttachmentsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ServiceAttachmentsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ServiceAttachmentsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ServiceAttachmentsTransport, '_prep_wrapped_messages') as prep: + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ServiceAttachmentsTransport, '_prep_wrapped_messages') as prep: + transport_class = ServiceAttachmentsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ServiceAttachmentsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_snapshots.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_snapshots.py new file mode 100644 index 000000000..9832facb2 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_snapshots.py @@ -0,0 +1,1601 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.snapshots import SnapshotsClient +from google.cloud.compute_v1.services.snapshots import pagers +from google.cloud.compute_v1.services.snapshots import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SnapshotsClient._get_default_mtls_endpoint(None) is None + assert SnapshotsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SnapshotsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SnapshotsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SnapshotsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SnapshotsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + SnapshotsClient, +]) +def test_snapshots_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SnapshotsRestTransport, "rest"), +]) +def test_snapshots_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + SnapshotsClient, +]) +def test_snapshots_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_snapshots_client_get_transport_class(): + transport = SnapshotsClient.get_transport_class() + available_transports = [ + transports.SnapshotsRestTransport, + ] + assert transport in available_transports + + transport = SnapshotsClient.get_transport_class("rest") + assert transport == transports.SnapshotsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SnapshotsClient, transports.SnapshotsRestTransport, "rest"), +]) +@mock.patch.object(SnapshotsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SnapshotsClient)) +def test_snapshots_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SnapshotsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SnapshotsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SnapshotsClient, transports.SnapshotsRestTransport, "rest", "true"), + (SnapshotsClient, transports.SnapshotsRestTransport, "rest", "false"), +]) +@mock.patch.object(SnapshotsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SnapshotsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_snapshots_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SnapshotsClient, transports.SnapshotsRestTransport, "rest"), +]) +def test_snapshots_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SnapshotsClient, transports.SnapshotsRestTransport, "rest"), +]) +def test_snapshots_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "snapshot": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "snapshot": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "snapshot": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + snapshot='snapshot_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/snapshots/{snapshot}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteSnapshotRequest(), + project='project_value', + snapshot='snapshot_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "snapshot": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Snapshot( + auto_created=True, + chain_name='chain_name_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + disk_size_gb=1261, + download_bytes=1502, + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + license_codes=[1360], + licenses=['licenses_value'], + location_hint='location_hint_value', + name='name_value', + satisfies_pzs=True, + self_link='self_link_value', + source_disk='source_disk_value', + source_disk_id='source_disk_id_value', + status='status_value', + storage_bytes=1403, + storage_bytes_status='storage_bytes_status_value', + storage_locations=['storage_locations_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Snapshot.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Snapshot) + assert response.auto_created is True + assert response.chain_name == 'chain_name_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.disk_size_gb == 1261 + assert response.download_bytes == 1502 + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.license_codes == [1360] + assert response.licenses == ['licenses_value'] + assert response.location_hint == 'location_hint_value' + assert response.name == 'name_value' + assert response.satisfies_pzs is True + assert response.self_link == 'self_link_value' + assert response.source_disk == 'source_disk_value' + assert response.source_disk_id == 'source_disk_id_value' + assert response.status == 'status_value' + assert response.storage_bytes == 1403 + assert response.storage_bytes_status == 'storage_bytes_status_value' + assert response.storage_locations == ['storage_locations_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "snapshot": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Snapshot() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Snapshot.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "snapshot": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + snapshot='snapshot_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/snapshots/{snapshot}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetSnapshotRequest(), + project='project_value', + snapshot='snapshot_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicySnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicySnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/snapshots/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicySnapshotRequest(), + project='project_value', + resource='resource_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListSnapshotsRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SnapshotList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SnapshotList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListSnapshotsRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SnapshotList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SnapshotList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/snapshots" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListSnapshotsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SnapshotList( + items=[ + compute.Snapshot(), + compute.Snapshot(), + compute.Snapshot(), + ], + next_page_token='abc', + ), + compute.SnapshotList( + items=[], + next_page_token='def', + ), + compute.SnapshotList( + items=[ + compute.Snapshot(), + ], + next_page_token='ghi', + ), + compute.SnapshotList( + items=[ + compute.Snapshot(), + compute.Snapshot(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SnapshotList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Snapshot) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicySnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicySnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_policy_request_resource"] = compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/snapshots/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicySnapshotRequest(), + project='project_value', + resource='resource_value', + global_set_policy_request_resource=compute.GlobalSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["global_set_labels_request_resource"] = compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/snapshots/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsSnapshotRequest(), + project='project_value', + resource='resource_value', + global_set_labels_request_resource=compute.GlobalSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsSnapshotRequest): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "resource": "sample2"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "resource": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/snapshots/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsSnapshotRequest(), + project='project_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SnapshotsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SnapshotsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SnapshotsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SnapshotsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SnapshotsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SnapshotsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SnapshotsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.SnapshotsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_snapshots_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SnapshotsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_snapshots_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.snapshots.transports.SnapshotsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SnapshotsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'get_iam_policy', + 'list', + 'set_iam_policy', + 'set_labels', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_snapshots_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.snapshots.transports.SnapshotsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SnapshotsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_snapshots_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.snapshots.transports.SnapshotsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SnapshotsTransport() + adc.assert_called_once() + + +def test_snapshots_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SnapshotsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_snapshots_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.SnapshotsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_snapshots_host_no_port(): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_snapshots_host_with_port(): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SnapshotsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = SnapshotsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SnapshotsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = SnapshotsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = SnapshotsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SnapshotsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SnapshotsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = SnapshotsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SnapshotsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = SnapshotsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = SnapshotsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SnapshotsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SnapshotsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = SnapshotsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SnapshotsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SnapshotsTransport, '_prep_wrapped_messages') as prep: + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SnapshotsTransport, '_prep_wrapped_messages') as prep: + transport_class = SnapshotsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = SnapshotsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_ssl_certificates.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_ssl_certificates.py new file mode 100644 index 000000000..fdfcdf482 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_ssl_certificates.py @@ -0,0 +1,1426 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.ssl_certificates import SslCertificatesClient +from google.cloud.compute_v1.services.ssl_certificates import pagers +from google.cloud.compute_v1.services.ssl_certificates import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SslCertificatesClient._get_default_mtls_endpoint(None) is None + assert SslCertificatesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SslCertificatesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SslCertificatesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SslCertificatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SslCertificatesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + SslCertificatesClient, +]) +def test_ssl_certificates_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SslCertificatesRestTransport, "rest"), +]) +def test_ssl_certificates_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + SslCertificatesClient, +]) +def test_ssl_certificates_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_ssl_certificates_client_get_transport_class(): + transport = SslCertificatesClient.get_transport_class() + available_transports = [ + transports.SslCertificatesRestTransport, + ] + assert transport in available_transports + + transport = SslCertificatesClient.get_transport_class("rest") + assert transport == transports.SslCertificatesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"), +]) +@mock.patch.object(SslCertificatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SslCertificatesClient)) +def test_ssl_certificates_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SslCertificatesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SslCertificatesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SslCertificatesClient, transports.SslCertificatesRestTransport, "rest", "true"), + (SslCertificatesClient, transports.SslCertificatesRestTransport, "rest", "false"), +]) +@mock.patch.object(SslCertificatesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SslCertificatesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_ssl_certificates_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"), +]) +def test_ssl_certificates_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"), +]) +def test_ssl_certificates_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListSslCertificatesRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificateAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificateAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListSslCertificatesRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificateAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificateAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/sslCertificates" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListSslCertificatesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SslCertificateAggregatedList( + items={ + 'a':compute.SslCertificatesScopedList(), + 'b':compute.SslCertificatesScopedList(), + 'c':compute.SslCertificatesScopedList(), + }, + next_page_token='abc', + ), + compute.SslCertificateAggregatedList( + items={}, + next_page_token='def', + ), + compute.SslCertificateAggregatedList( + items={ + 'g':compute.SslCertificatesScopedList(), + }, + next_page_token='ghi', + ), + compute.SslCertificateAggregatedList( + items={ + 'h':compute.SslCertificatesScopedList(), + 'i':compute.SslCertificatesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SslCertificateAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.SslCertificatesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.SslCertificatesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.SslCertificatesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteSslCertificateRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_certificate": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteSslCertificateRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_certificate": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "ssl_certificate": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ssl_certificate='ssl_certificate_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteSslCertificateRequest(), + project='project_value', + ssl_certificate='ssl_certificate_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetSslCertificateRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_certificate": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificate( + certificate='certificate_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + expire_time='expire_time_value', + id=205, + kind='kind_value', + name='name_value', + private_key='private_key_value', + region='region_value', + self_link='self_link_value', + subject_alternative_names=['subject_alternative_names_value'], + type_='type__value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificate.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SslCertificate) + assert response.certificate == 'certificate_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.expire_time == 'expire_time_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.private_key == 'private_key_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.subject_alternative_names == ['subject_alternative_names_value'] + assert response.type_ == 'type__value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetSslCertificateRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_certificate": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificate() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificate.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "ssl_certificate": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ssl_certificate='ssl_certificate_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetSslCertificateRequest(), + project='project_value', + ssl_certificate='ssl_certificate_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertSslCertificateRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["ssl_certificate_resource"] = compute.SslCertificate(certificate='certificate_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertSslCertificateRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["ssl_certificate_resource"] = compute.SslCertificate(certificate='certificate_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ssl_certificate_resource=compute.SslCertificate(certificate='certificate_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslCertificates" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertSslCertificateRequest(), + project='project_value', + ssl_certificate_resource=compute.SslCertificate(certificate='certificate_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListSslCertificatesRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificateList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificateList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListSslCertificatesRequest): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslCertificateList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslCertificateList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslCertificates" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListSslCertificatesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SslCertificateList( + items=[ + compute.SslCertificate(), + compute.SslCertificate(), + compute.SslCertificate(), + ], + next_page_token='abc', + ), + compute.SslCertificateList( + items=[], + next_page_token='def', + ), + compute.SslCertificateList( + items=[ + compute.SslCertificate(), + ], + next_page_token='ghi', + ), + compute.SslCertificateList( + items=[ + compute.SslCertificate(), + compute.SslCertificate(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SslCertificateList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.SslCertificate) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SslCertificatesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SslCertificatesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SslCertificatesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SslCertificatesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.SslCertificatesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_ssl_certificates_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SslCertificatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_ssl_certificates_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SslCertificatesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_ssl_certificates_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SslCertificatesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_ssl_certificates_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SslCertificatesTransport() + adc.assert_called_once() + + +def test_ssl_certificates_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SslCertificatesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_ssl_certificates_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.SslCertificatesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_ssl_certificates_host_no_port(): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_ssl_certificates_host_with_port(): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SslCertificatesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = SslCertificatesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SslCertificatesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = SslCertificatesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = SslCertificatesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SslCertificatesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SslCertificatesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = SslCertificatesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SslCertificatesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = SslCertificatesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = SslCertificatesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SslCertificatesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SslCertificatesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = SslCertificatesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SslCertificatesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SslCertificatesTransport, '_prep_wrapped_messages') as prep: + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SslCertificatesTransport, '_prep_wrapped_messages') as prep: + transport_class = SslCertificatesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = SslCertificatesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_ssl_policies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_ssl_policies.py new file mode 100644 index 000000000..5b0850e77 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_ssl_policies.py @@ -0,0 +1,1499 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.ssl_policies import SslPoliciesClient +from google.cloud.compute_v1.services.ssl_policies import pagers +from google.cloud.compute_v1.services.ssl_policies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SslPoliciesClient._get_default_mtls_endpoint(None) is None + assert SslPoliciesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SslPoliciesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SslPoliciesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SslPoliciesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SslPoliciesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + SslPoliciesClient, +]) +def test_ssl_policies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SslPoliciesRestTransport, "rest"), +]) +def test_ssl_policies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + SslPoliciesClient, +]) +def test_ssl_policies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_ssl_policies_client_get_transport_class(): + transport = SslPoliciesClient.get_transport_class() + available_transports = [ + transports.SslPoliciesRestTransport, + ] + assert transport in available_transports + + transport = SslPoliciesClient.get_transport_class("rest") + assert transport == transports.SslPoliciesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SslPoliciesClient, transports.SslPoliciesRestTransport, "rest"), +]) +@mock.patch.object(SslPoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SslPoliciesClient)) +def test_ssl_policies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SslPoliciesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SslPoliciesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SslPoliciesClient, transports.SslPoliciesRestTransport, "rest", "true"), + (SslPoliciesClient, transports.SslPoliciesRestTransport, "rest", "false"), +]) +@mock.patch.object(SslPoliciesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SslPoliciesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_ssl_policies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SslPoliciesClient, transports.SslPoliciesRestTransport, "rest"), +]) +def test_ssl_policies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SslPoliciesClient, transports.SslPoliciesRestTransport, "rest"), +]) +def test_ssl_policies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "ssl_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ssl_policy='ssl_policy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteSslPolicyRequest(), + project='project_value', + ssl_policy='ssl_policy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslPolicy( + creation_timestamp='creation_timestamp_value', + custom_features=['custom_features_value'], + description='description_value', + enabled_features=['enabled_features_value'], + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + min_tls_version='min_tls_version_value', + name='name_value', + profile='profile_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslPolicy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SslPolicy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.custom_features == ['custom_features_value'] + assert response.description == 'description_value' + assert response.enabled_features == ['enabled_features_value'] + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.min_tls_version == 'min_tls_version_value' + assert response.name == 'name_value' + assert response.profile == 'profile_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_policy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslPolicy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslPolicy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "ssl_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ssl_policy='ssl_policy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetSslPolicyRequest(), + project='project_value', + ssl_policy='ssl_policy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["ssl_policy_resource"] = compute.SslPolicy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["ssl_policy_resource"] = compute.SslPolicy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ssl_policy_resource=compute.SslPolicy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslPolicies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertSslPolicyRequest(), + project='project_value', + ssl_policy_resource=compute.SslPolicy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListSslPoliciesRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslPoliciesList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslPoliciesList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListSslPoliciesRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslPoliciesList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslPoliciesList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslPolicies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListSslPoliciesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SslPoliciesList( + items=[ + compute.SslPolicy(), + compute.SslPolicy(), + compute.SslPolicy(), + ], + next_page_token='abc', + ), + compute.SslPoliciesList( + items=[], + next_page_token='def', + ), + compute.SslPoliciesList( + items=[ + compute.SslPolicy(), + ], + next_page_token='ghi', + ), + compute.SslPoliciesList( + items=[ + compute.SslPolicy(), + compute.SslPolicy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SslPoliciesList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.SslPolicy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_available_features_rest(transport: str = 'rest', request_type=compute.ListAvailableFeaturesSslPoliciesRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslPoliciesListAvailableFeaturesResponse( + features=['features_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslPoliciesListAvailableFeaturesResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_available_features(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.SslPoliciesListAvailableFeaturesResponse) + assert response.features == ['features_value'] + + +def test_list_available_features_rest_bad_request(transport: str = 'rest', request_type=compute.ListAvailableFeaturesSslPoliciesRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_available_features(request) + + +def test_list_available_features_rest_from_dict(): + test_list_available_features_rest(request_type=dict) + + +def test_list_available_features_rest_flattened(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SslPoliciesListAvailableFeaturesResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SslPoliciesListAvailableFeaturesResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list_available_features(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslPolicies/listAvailableFeatures" % client.transport._host, args[1]) + + +def test_list_available_features_rest_flattened_error(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_available_features( + compute.ListAvailableFeaturesSslPoliciesRequest(), + project='project_value', + ) + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_policy": "sample2"} + request_init["ssl_policy_resource"] = compute.SslPolicy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchSslPolicyRequest): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "ssl_policy": "sample2"} + request_init["ssl_policy_resource"] = compute.SslPolicy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "ssl_policy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ssl_policy='ssl_policy_value', + ssl_policy_resource=compute.SslPolicy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchSslPolicyRequest(), + project='project_value', + ssl_policy='ssl_policy_value', + ssl_policy_resource=compute.SslPolicy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SslPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SslPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SslPoliciesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SslPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SslPoliciesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SslPoliciesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SslPoliciesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.SslPoliciesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_ssl_policies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SslPoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_ssl_policies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.ssl_policies.transports.SslPoliciesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SslPoliciesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'list_available_features', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_ssl_policies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.ssl_policies.transports.SslPoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SslPoliciesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_ssl_policies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.ssl_policies.transports.SslPoliciesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SslPoliciesTransport() + adc.assert_called_once() + + +def test_ssl_policies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SslPoliciesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_ssl_policies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.SslPoliciesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_ssl_policies_host_no_port(): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_ssl_policies_host_with_port(): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SslPoliciesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = SslPoliciesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SslPoliciesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = SslPoliciesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = SslPoliciesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SslPoliciesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SslPoliciesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = SslPoliciesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SslPoliciesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = SslPoliciesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = SslPoliciesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SslPoliciesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SslPoliciesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = SslPoliciesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SslPoliciesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SslPoliciesTransport, '_prep_wrapped_messages') as prep: + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SslPoliciesTransport, '_prep_wrapped_messages') as prep: + transport_class = SslPoliciesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = SslPoliciesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_subnetworks.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_subnetworks.py new file mode 100644 index 000000000..b6cf5d050 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_subnetworks.py @@ -0,0 +1,2439 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.subnetworks import SubnetworksClient +from google.cloud.compute_v1.services.subnetworks import pagers +from google.cloud.compute_v1.services.subnetworks import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert SubnetworksClient._get_default_mtls_endpoint(None) is None + assert SubnetworksClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert SubnetworksClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert SubnetworksClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert SubnetworksClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert SubnetworksClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + SubnetworksClient, +]) +def test_subnetworks_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.SubnetworksRestTransport, "rest"), +]) +def test_subnetworks_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + SubnetworksClient, +]) +def test_subnetworks_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_subnetworks_client_get_transport_class(): + transport = SubnetworksClient.get_transport_class() + available_transports = [ + transports.SubnetworksRestTransport, + ] + assert transport in available_transports + + transport = SubnetworksClient.get_transport_class("rest") + assert transport == transports.SubnetworksRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SubnetworksClient, transports.SubnetworksRestTransport, "rest"), +]) +@mock.patch.object(SubnetworksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubnetworksClient)) +def test_subnetworks_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(SubnetworksClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(SubnetworksClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (SubnetworksClient, transports.SubnetworksRestTransport, "rest", "true"), + (SubnetworksClient, transports.SubnetworksRestTransport, "rest", "false"), +]) +@mock.patch.object(SubnetworksClient, "DEFAULT_ENDPOINT", modify_default_endpoint(SubnetworksClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_subnetworks_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SubnetworksClient, transports.SubnetworksRestTransport, "rest"), +]) +def test_subnetworks_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (SubnetworksClient, transports.SubnetworksRestTransport, "rest"), +]) +def test_subnetworks_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListSubnetworksRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SubnetworkAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SubnetworkAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListSubnetworksRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SubnetworkAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SubnetworkAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/subnetworks" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListSubnetworksRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SubnetworkAggregatedList( + items={ + 'a':compute.SubnetworksScopedList(), + 'b':compute.SubnetworksScopedList(), + 'c':compute.SubnetworksScopedList(), + }, + next_page_token='abc', + ), + compute.SubnetworkAggregatedList( + items={}, + next_page_token='def', + ), + compute.SubnetworkAggregatedList( + items={ + 'g':compute.SubnetworksScopedList(), + }, + next_page_token='ghi', + ), + compute.SubnetworkAggregatedList( + items={ + 'h':compute.SubnetworksScopedList(), + 'i':compute.SubnetworksScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SubnetworkAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.SubnetworksScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.SubnetworksScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.SubnetworksScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteSubnetworkRequest(), + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + ) + + +def test_expand_ip_cidr_range_rest(transport: str = 'rest', request_type=compute.ExpandIpCidrRangeSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request_init["subnetworks_expand_ip_cidr_range_request_resource"] = compute.SubnetworksExpandIpCidrRangeRequest(ip_cidr_range='ip_cidr_range_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.expand_ip_cidr_range(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_expand_ip_cidr_range_rest_bad_request(transport: str = 'rest', request_type=compute.ExpandIpCidrRangeSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request_init["subnetworks_expand_ip_cidr_range_request_resource"] = compute.SubnetworksExpandIpCidrRangeRequest(ip_cidr_range='ip_cidr_range_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.expand_ip_cidr_range(request) + + +def test_expand_ip_cidr_range_rest_from_dict(): + test_expand_ip_cidr_range_rest(request_type=dict) + + +def test_expand_ip_cidr_range_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + subnetworks_expand_ip_cidr_range_request_resource=compute.SubnetworksExpandIpCidrRangeRequest(ip_cidr_range='ip_cidr_range_value'), + ) + mock_args.update(sample_request) + client.expand_ip_cidr_range(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}/expandIpCidrRange" % client.transport._host, args[1]) + + +def test_expand_ip_cidr_range_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.expand_ip_cidr_range( + compute.ExpandIpCidrRangeSubnetworkRequest(), + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + subnetworks_expand_ip_cidr_range_request_resource=compute.SubnetworksExpandIpCidrRangeRequest(ip_cidr_range='ip_cidr_range_value'), + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Subnetwork( + creation_timestamp='creation_timestamp_value', + description='description_value', + enable_flow_logs=True, + external_ipv6_prefix='external_ipv6_prefix_value', + fingerprint='fingerprint_value', + gateway_address='gateway_address_value', + id=205, + ip_cidr_range='ip_cidr_range_value', + ipv6_access_type='ipv6_access_type_value', + ipv6_cidr_range='ipv6_cidr_range_value', + kind='kind_value', + name='name_value', + network='network_value', + private_ip_google_access=True, + private_ipv6_google_access='private_ipv6_google_access_value', + purpose='purpose_value', + region='region_value', + role='role_value', + self_link='self_link_value', + stack_type='stack_type_value', + state='state_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Subnetwork.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Subnetwork) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.enable_flow_logs is True + assert response.external_ipv6_prefix == 'external_ipv6_prefix_value' + assert response.fingerprint == 'fingerprint_value' + assert response.gateway_address == 'gateway_address_value' + assert response.id == 205 + assert response.ip_cidr_range == 'ip_cidr_range_value' + assert response.ipv6_access_type == 'ipv6_access_type_value' + assert response.ipv6_cidr_range == 'ipv6_cidr_range_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.private_ip_google_access is True + assert response.private_ipv6_google_access == 'private_ipv6_google_access_value' + assert response.purpose == 'purpose_value' + assert response.region == 'region_value' + assert response.role == 'role_value' + assert response.self_link == 'self_link_value' + assert response.stack_type == 'stack_type_value' + assert response.state == 'state_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Subnetwork() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Subnetwork.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetSubnetworkRequest(), + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + ) + + +def test_get_iam_policy_rest(transport: str = 'rest', request_type=compute.GetIamPolicySubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.GetIamPolicySubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_from_dict(): + test_get_iam_policy_rest(request_type=dict) + + +def test_get_iam_policy_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + ) + mock_args.update(sample_request) + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{resource}/getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + compute.GetIamPolicySubnetworkRequest(), + project='project_value', + region='region_value', + resource='resource_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["subnetwork_resource"] = compute.Subnetwork(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["subnetwork_resource"] = compute.Subnetwork(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + subnetwork_resource=compute.Subnetwork(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertSubnetworkRequest(), + project='project_value', + region='region_value', + subnetwork_resource=compute.Subnetwork(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListSubnetworksRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SubnetworkList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SubnetworkList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListSubnetworksRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.SubnetworkList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.SubnetworkList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListSubnetworksRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.SubnetworkList( + items=[ + compute.Subnetwork(), + compute.Subnetwork(), + compute.Subnetwork(), + ], + next_page_token='abc', + ), + compute.SubnetworkList( + items=[], + next_page_token='def', + ), + compute.SubnetworkList( + items=[ + compute.Subnetwork(), + ], + next_page_token='ghi', + ), + compute.SubnetworkList( + items=[ + compute.Subnetwork(), + compute.Subnetwork(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.SubnetworkList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Subnetwork) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_list_usable_rest(transport: str = 'rest', request_type=compute.ListUsableSubnetworksRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UsableSubnetworksAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UsableSubnetworksAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_usable(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListUsablePager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_usable_rest_bad_request(transport: str = 'rest', request_type=compute.ListUsableSubnetworksRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_usable(request) + + +def test_list_usable_rest_from_dict(): + test_list_usable_rest(request_type=dict) + + +def test_list_usable_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UsableSubnetworksAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UsableSubnetworksAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list_usable(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/subnetworks/listUsable" % client.transport._host, args[1]) + + +def test_list_usable_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_usable( + compute.ListUsableSubnetworksRequest(), + project='project_value', + ) + + +def test_list_usable_rest_pager(): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.UsableSubnetworksAggregatedList( + items=[ + compute.UsableSubnetwork(), + compute.UsableSubnetwork(), + compute.UsableSubnetwork(), + ], + next_page_token='abc', + ), + compute.UsableSubnetworksAggregatedList( + items=[], + next_page_token='def', + ), + compute.UsableSubnetworksAggregatedList( + items=[ + compute.UsableSubnetwork(), + ], + next_page_token='ghi', + ), + compute.UsableSubnetworksAggregatedList( + items=[ + compute.UsableSubnetwork(), + compute.UsableSubnetwork(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.UsableSubnetworksAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list_usable(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.UsableSubnetwork) + for i in results) + + pages = list(client.list_usable(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request_init["subnetwork_resource"] = compute.Subnetwork(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request_init["subnetwork_resource"] = compute.Subnetwork(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + subnetwork_resource=compute.Subnetwork(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchSubnetworkRequest(), + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + subnetwork_resource=compute.Subnetwork(creation_timestamp='creation_timestamp_value'), + ) + + +def test_set_iam_policy_rest(transport: str = 'rest', request_type=compute.SetIamPolicySubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy( + etag='etag_value', + iam_owned=True, + version=774, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Policy) + assert response.etag == 'etag_value' + assert response.iam_owned is True + assert response.version == 774 + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetIamPolicySubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_policy_request_resource"] = compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_from_dict(): + test_set_iam_policy_rest(request_type=dict) + + +def test_set_iam_policy_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Policy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Policy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + mock_args.update(sample_request) + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{resource}/setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + compute.SetIamPolicySubnetworkRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_policy_request_resource=compute.RegionSetPolicyRequest(bindings=[compute.Binding(binding_id='binding_id_value')]), + ) + + +def test_set_private_ip_google_access_rest(transport: str = 'rest', request_type=compute.SetPrivateIpGoogleAccessSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request_init["subnetworks_set_private_ip_google_access_request_resource"] = compute.SubnetworksSetPrivateIpGoogleAccessRequest(private_ip_google_access=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_private_ip_google_access(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_private_ip_google_access_rest_bad_request(transport: str = 'rest', request_type=compute.SetPrivateIpGoogleAccessSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + request_init["subnetworks_set_private_ip_google_access_request_resource"] = compute.SubnetworksSetPrivateIpGoogleAccessRequest(private_ip_google_access=True) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_private_ip_google_access(request) + + +def test_set_private_ip_google_access_rest_from_dict(): + test_set_private_ip_google_access_rest(request_type=dict) + + +def test_set_private_ip_google_access_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "subnetwork": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + subnetworks_set_private_ip_google_access_request_resource=compute.SubnetworksSetPrivateIpGoogleAccessRequest(private_ip_google_access=True), + ) + mock_args.update(sample_request) + client.set_private_ip_google_access(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{subnetwork}/setPrivateIpGoogleAccess" % client.transport._host, args[1]) + + +def test_set_private_ip_google_access_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_private_ip_google_access( + compute.SetPrivateIpGoogleAccessSubnetworkRequest(), + project='project_value', + region='region_value', + subnetwork='subnetwork_value', + subnetworks_set_private_ip_google_access_request_resource=compute.SubnetworksSetPrivateIpGoogleAccessRequest(private_ip_google_access=True), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsSubnetworkRequest): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/subnetworks/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsSubnetworkRequest(), + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.SubnetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.SubnetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SubnetworksClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.SubnetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = SubnetworksClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.SubnetworksRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = SubnetworksClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.SubnetworksRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_subnetworks_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.SubnetworksTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_subnetworks_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.subnetworks.transports.SubnetworksTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.SubnetworksTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'expand_ip_cidr_range', + 'get', + 'get_iam_policy', + 'insert', + 'list', + 'list_usable', + 'patch', + 'set_iam_policy', + 'set_private_ip_google_access', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_subnetworks_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.subnetworks.transports.SubnetworksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SubnetworksTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_subnetworks_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.subnetworks.transports.SubnetworksTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.SubnetworksTransport() + adc.assert_called_once() + + +def test_subnetworks_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + SubnetworksClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_subnetworks_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.SubnetworksRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_subnetworks_host_no_port(): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_subnetworks_host_with_port(): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = SubnetworksClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = SubnetworksClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = SubnetworksClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = SubnetworksClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = SubnetworksClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = SubnetworksClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = SubnetworksClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = SubnetworksClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = SubnetworksClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = SubnetworksClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = SubnetworksClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = SubnetworksClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = SubnetworksClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = SubnetworksClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = SubnetworksClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.SubnetworksTransport, '_prep_wrapped_messages') as prep: + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.SubnetworksTransport, '_prep_wrapped_messages') as prep: + transport_class = SubnetworksClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = SubnetworksClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_grpc_proxies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_grpc_proxies.py new file mode 100644 index 000000000..a512efdca --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_grpc_proxies.py @@ -0,0 +1,1391 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_grpc_proxies import TargetGrpcProxiesClient +from google.cloud.compute_v1.services.target_grpc_proxies import pagers +from google.cloud.compute_v1.services.target_grpc_proxies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetGrpcProxiesClient._get_default_mtls_endpoint(None) is None + assert TargetGrpcProxiesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetGrpcProxiesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetGrpcProxiesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetGrpcProxiesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetGrpcProxiesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetGrpcProxiesClient, +]) +def test_target_grpc_proxies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetGrpcProxiesRestTransport, "rest"), +]) +def test_target_grpc_proxies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetGrpcProxiesClient, +]) +def test_target_grpc_proxies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_grpc_proxies_client_get_transport_class(): + transport = TargetGrpcProxiesClient.get_transport_class() + available_transports = [ + transports.TargetGrpcProxiesRestTransport, + ] + assert transport in available_transports + + transport = TargetGrpcProxiesClient.get_transport_class("rest") + assert transport == transports.TargetGrpcProxiesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetGrpcProxiesClient, transports.TargetGrpcProxiesRestTransport, "rest"), +]) +@mock.patch.object(TargetGrpcProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetGrpcProxiesClient)) +def test_target_grpc_proxies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetGrpcProxiesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetGrpcProxiesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetGrpcProxiesClient, transports.TargetGrpcProxiesRestTransport, "rest", "true"), + (TargetGrpcProxiesClient, transports.TargetGrpcProxiesRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetGrpcProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetGrpcProxiesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_grpc_proxies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetGrpcProxiesClient, transports.TargetGrpcProxiesRestTransport, "rest"), +]) +def test_target_grpc_proxies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetGrpcProxiesClient, transports.TargetGrpcProxiesRestTransport, "rest"), +]) +def test_target_grpc_proxies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_grpc_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_grpc_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_grpc_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_grpc_proxy='target_grpc_proxy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetGrpcProxies/{target_grpc_proxy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetGrpcProxyRequest(), + project='project_value', + target_grpc_proxy='target_grpc_proxy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_grpc_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetGrpcProxy( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + self_link='self_link_value', + self_link_with_id='self_link_with_id_value', + url_map='url_map_value', + validate_for_proxyless=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetGrpcProxy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetGrpcProxy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.self_link == 'self_link_value' + assert response.self_link_with_id == 'self_link_with_id_value' + assert response.url_map == 'url_map_value' + assert response.validate_for_proxyless is True + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_grpc_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetGrpcProxy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetGrpcProxy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_grpc_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_grpc_proxy='target_grpc_proxy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetGrpcProxies/{target_grpc_proxy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetGrpcProxyRequest(), + project='project_value', + target_grpc_proxy='target_grpc_proxy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_grpc_proxy_resource"] = compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_grpc_proxy_resource"] = compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_grpc_proxy_resource=compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetGrpcProxies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetGrpcProxyRequest(), + project='project_value', + target_grpc_proxy_resource=compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetGrpcProxiesRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetGrpcProxyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetGrpcProxyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetGrpcProxiesRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetGrpcProxyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetGrpcProxyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetGrpcProxies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetGrpcProxiesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetGrpcProxyList( + items=[ + compute.TargetGrpcProxy(), + compute.TargetGrpcProxy(), + compute.TargetGrpcProxy(), + ], + next_page_token='abc', + ), + compute.TargetGrpcProxyList( + items=[], + next_page_token='def', + ), + compute.TargetGrpcProxyList( + items=[ + compute.TargetGrpcProxy(), + ], + next_page_token='ghi', + ), + compute.TargetGrpcProxyList( + items=[ + compute.TargetGrpcProxy(), + compute.TargetGrpcProxy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetGrpcProxyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetGrpcProxy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_grpc_proxy": "sample2"} + request_init["target_grpc_proxy_resource"] = compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchTargetGrpcProxyRequest): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_grpc_proxy": "sample2"} + request_init["target_grpc_proxy_resource"] = compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_grpc_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_grpc_proxy='target_grpc_proxy_value', + target_grpc_proxy_resource=compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetGrpcProxies/{target_grpc_proxy}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchTargetGrpcProxyRequest(), + project='project_value', + target_grpc_proxy='target_grpc_proxy_value', + target_grpc_proxy_resource=compute.TargetGrpcProxy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetGrpcProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetGrpcProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetGrpcProxiesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetGrpcProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetGrpcProxiesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetGrpcProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetGrpcProxiesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetGrpcProxiesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_grpc_proxies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetGrpcProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_grpc_proxies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_grpc_proxies.transports.TargetGrpcProxiesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetGrpcProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'patch', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_grpc_proxies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_grpc_proxies.transports.TargetGrpcProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetGrpcProxiesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_grpc_proxies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_grpc_proxies.transports.TargetGrpcProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetGrpcProxiesTransport() + adc.assert_called_once() + + +def test_target_grpc_proxies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetGrpcProxiesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_grpc_proxies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetGrpcProxiesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_grpc_proxies_host_no_port(): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_grpc_proxies_host_with_port(): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetGrpcProxiesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetGrpcProxiesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetGrpcProxiesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetGrpcProxiesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetGrpcProxiesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetGrpcProxiesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetGrpcProxiesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetGrpcProxiesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetGrpcProxiesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetGrpcProxiesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetGrpcProxiesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetGrpcProxiesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetGrpcProxiesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetGrpcProxiesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetGrpcProxiesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetGrpcProxiesTransport, '_prep_wrapped_messages') as prep: + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetGrpcProxiesTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetGrpcProxiesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetGrpcProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_http_proxies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_http_proxies.py new file mode 100644 index 000000000..25423122a --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_http_proxies.py @@ -0,0 +1,1730 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_http_proxies import TargetHttpProxiesClient +from google.cloud.compute_v1.services.target_http_proxies import pagers +from google.cloud.compute_v1.services.target_http_proxies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetHttpProxiesClient._get_default_mtls_endpoint(None) is None + assert TargetHttpProxiesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetHttpProxiesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetHttpProxiesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetHttpProxiesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetHttpProxiesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetHttpProxiesClient, +]) +def test_target_http_proxies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetHttpProxiesRestTransport, "rest"), +]) +def test_target_http_proxies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetHttpProxiesClient, +]) +def test_target_http_proxies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_http_proxies_client_get_transport_class(): + transport = TargetHttpProxiesClient.get_transport_class() + available_transports = [ + transports.TargetHttpProxiesRestTransport, + ] + assert transport in available_transports + + transport = TargetHttpProxiesClient.get_transport_class("rest") + assert transport == transports.TargetHttpProxiesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetHttpProxiesClient, transports.TargetHttpProxiesRestTransport, "rest"), +]) +@mock.patch.object(TargetHttpProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetHttpProxiesClient)) +def test_target_http_proxies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetHttpProxiesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetHttpProxiesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetHttpProxiesClient, transports.TargetHttpProxiesRestTransport, "rest", "true"), + (TargetHttpProxiesClient, transports.TargetHttpProxiesRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetHttpProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetHttpProxiesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_http_proxies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetHttpProxiesClient, transports.TargetHttpProxiesRestTransport, "rest"), +]) +def test_target_http_proxies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetHttpProxiesClient, transports.TargetHttpProxiesRestTransport, "rest"), +]) +def test_target_http_proxies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListTargetHttpProxiesRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxyAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxyAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListTargetHttpProxiesRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxyAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxyAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/targetHttpProxies" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListTargetHttpProxiesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetHttpProxyAggregatedList( + items={ + 'a':compute.TargetHttpProxiesScopedList(), + 'b':compute.TargetHttpProxiesScopedList(), + 'c':compute.TargetHttpProxiesScopedList(), + }, + next_page_token='abc', + ), + compute.TargetHttpProxyAggregatedList( + items={}, + next_page_token='def', + ), + compute.TargetHttpProxyAggregatedList( + items={ + 'g':compute.TargetHttpProxiesScopedList(), + }, + next_page_token='ghi', + ), + compute.TargetHttpProxyAggregatedList( + items={ + 'h':compute.TargetHttpProxiesScopedList(), + 'i':compute.TargetHttpProxiesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetHttpProxyAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.TargetHttpProxiesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.TargetHttpProxiesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.TargetHttpProxiesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_http_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_http_proxy='target_http_proxy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpProxies/{target_http_proxy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetHttpProxyRequest(), + project='project_value', + target_http_proxy='target_http_proxy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxy( + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + proxy_bind=True, + region='region_value', + self_link='self_link_value', + url_map='url_map_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetHttpProxy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.proxy_bind is True + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.url_map == 'url_map_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_http_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_http_proxy='target_http_proxy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpProxies/{target_http_proxy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetHttpProxyRequest(), + project='project_value', + target_http_proxy='target_http_proxy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_http_proxy_resource"] = compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_http_proxy_resource"] = compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_http_proxy_resource=compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpProxies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetHttpProxyRequest(), + project='project_value', + target_http_proxy_resource=compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetHttpProxiesRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetHttpProxiesRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpProxyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpProxyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpProxies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetHttpProxiesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetHttpProxyList( + items=[ + compute.TargetHttpProxy(), + compute.TargetHttpProxy(), + compute.TargetHttpProxy(), + ], + next_page_token='abc', + ), + compute.TargetHttpProxyList( + items=[], + next_page_token='def', + ), + compute.TargetHttpProxyList( + items=[ + compute.TargetHttpProxy(), + ], + next_page_token='ghi', + ), + compute.TargetHttpProxyList( + items=[ + compute.TargetHttpProxy(), + compute.TargetHttpProxy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetHttpProxyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetHttpProxy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request_init["target_http_proxy_resource"] = compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request_init["target_http_proxy_resource"] = compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_http_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_http_proxy='target_http_proxy_value', + target_http_proxy_resource=compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpProxies/{target_http_proxy}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchTargetHttpProxyRequest(), + project='project_value', + target_http_proxy='target_http_proxy_value', + target_http_proxy_resource=compute.TargetHttpProxy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_set_url_map_rest(transport: str = 'rest', request_type=compute.SetUrlMapTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_url_map(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_url_map_rest_bad_request(transport: str = 'rest', request_type=compute.SetUrlMapTargetHttpProxyRequest): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_http_proxy": "sample2"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_url_map(request) + + +def test_set_url_map_rest_from_dict(): + test_set_url_map_rest(request_type=dict) + + +def test_set_url_map_rest_flattened(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_http_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_http_proxy='target_http_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + mock_args.update(sample_request) + client.set_url_map(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/targetHttpProxies/{target_http_proxy}/setUrlMap" % client.transport._host, args[1]) + + +def test_set_url_map_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_url_map( + compute.SetUrlMapTargetHttpProxyRequest(), + project='project_value', + target_http_proxy='target_http_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetHttpProxiesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetHttpProxiesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetHttpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetHttpProxiesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetHttpProxiesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_http_proxies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetHttpProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_http_proxies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_http_proxies.transports.TargetHttpProxiesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetHttpProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'set_url_map', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_http_proxies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_http_proxies.transports.TargetHttpProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetHttpProxiesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_http_proxies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_http_proxies.transports.TargetHttpProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetHttpProxiesTransport() + adc.assert_called_once() + + +def test_target_http_proxies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetHttpProxiesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_http_proxies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetHttpProxiesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_http_proxies_host_no_port(): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_http_proxies_host_with_port(): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetHttpProxiesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetHttpProxiesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpProxiesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetHttpProxiesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetHttpProxiesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpProxiesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetHttpProxiesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetHttpProxiesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpProxiesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetHttpProxiesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetHttpProxiesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpProxiesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetHttpProxiesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetHttpProxiesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpProxiesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetHttpProxiesTransport, '_prep_wrapped_messages') as prep: + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetHttpProxiesTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetHttpProxiesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetHttpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_https_proxies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_https_proxies.py new file mode 100644 index 000000000..140ed5150 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_https_proxies.py @@ -0,0 +1,2202 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_https_proxies import TargetHttpsProxiesClient +from google.cloud.compute_v1.services.target_https_proxies import pagers +from google.cloud.compute_v1.services.target_https_proxies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetHttpsProxiesClient._get_default_mtls_endpoint(None) is None + assert TargetHttpsProxiesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetHttpsProxiesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetHttpsProxiesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetHttpsProxiesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetHttpsProxiesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetHttpsProxiesClient, +]) +def test_target_https_proxies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetHttpsProxiesRestTransport, "rest"), +]) +def test_target_https_proxies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetHttpsProxiesClient, +]) +def test_target_https_proxies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_https_proxies_client_get_transport_class(): + transport = TargetHttpsProxiesClient.get_transport_class() + available_transports = [ + transports.TargetHttpsProxiesRestTransport, + ] + assert transport in available_transports + + transport = TargetHttpsProxiesClient.get_transport_class("rest") + assert transport == transports.TargetHttpsProxiesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetHttpsProxiesClient, transports.TargetHttpsProxiesRestTransport, "rest"), +]) +@mock.patch.object(TargetHttpsProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetHttpsProxiesClient)) +def test_target_https_proxies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetHttpsProxiesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetHttpsProxiesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetHttpsProxiesClient, transports.TargetHttpsProxiesRestTransport, "rest", "true"), + (TargetHttpsProxiesClient, transports.TargetHttpsProxiesRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetHttpsProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetHttpsProxiesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_https_proxies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetHttpsProxiesClient, transports.TargetHttpsProxiesRestTransport, "rest"), +]) +def test_target_https_proxies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetHttpsProxiesClient, transports.TargetHttpsProxiesRestTransport, "rest"), +]) +def test_target_https_proxies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListTargetHttpsProxiesRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxyAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxyAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListTargetHttpsProxiesRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxyAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxyAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/targetHttpsProxies" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListTargetHttpsProxiesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetHttpsProxyAggregatedList( + items={ + 'a':compute.TargetHttpsProxiesScopedList(), + 'b':compute.TargetHttpsProxiesScopedList(), + 'c':compute.TargetHttpsProxiesScopedList(), + }, + next_page_token='abc', + ), + compute.TargetHttpsProxyAggregatedList( + items={}, + next_page_token='def', + ), + compute.TargetHttpsProxyAggregatedList( + items={ + 'g':compute.TargetHttpsProxiesScopedList(), + }, + next_page_token='ghi', + ), + compute.TargetHttpsProxyAggregatedList( + items={ + 'h':compute.TargetHttpsProxiesScopedList(), + 'i':compute.TargetHttpsProxiesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetHttpsProxyAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.TargetHttpsProxiesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.TargetHttpsProxiesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.TargetHttpsProxiesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_https_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy='target_https_proxy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy='target_https_proxy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxy( + authorization_policy='authorization_policy_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + proxy_bind=True, + quic_override='quic_override_value', + region='region_value', + self_link='self_link_value', + server_tls_policy='server_tls_policy_value', + ssl_certificates=['ssl_certificates_value'], + ssl_policy='ssl_policy_value', + url_map='url_map_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetHttpsProxy) + assert response.authorization_policy == 'authorization_policy_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.proxy_bind is True + assert response.quic_override == 'quic_override_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.server_tls_policy == 'server_tls_policy_value' + assert response.ssl_certificates == ['ssl_certificates_value'] + assert response.ssl_policy == 'ssl_policy_value' + assert response.url_map == 'url_map_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_https_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy='target_https_proxy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy='target_https_proxy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_https_proxy_resource"] = compute.TargetHttpsProxy(authorization_policy='authorization_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_https_proxy_resource"] = compute.TargetHttpsProxy(authorization_policy='authorization_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy_resource=compute.TargetHttpsProxy(authorization_policy='authorization_policy_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpsProxies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy_resource=compute.TargetHttpsProxy(authorization_policy='authorization_policy_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetHttpsProxiesRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetHttpsProxiesRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetHttpsProxyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetHttpsProxyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpsProxies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetHttpsProxiesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetHttpsProxyList( + items=[ + compute.TargetHttpsProxy(), + compute.TargetHttpsProxy(), + compute.TargetHttpsProxy(), + ], + next_page_token='abc', + ), + compute.TargetHttpsProxyList( + items=[], + next_page_token='def', + ), + compute.TargetHttpsProxyList( + items=[ + compute.TargetHttpsProxy(), + ], + next_page_token='ghi', + ), + compute.TargetHttpsProxyList( + items=[ + compute.TargetHttpsProxy(), + compute.TargetHttpsProxy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetHttpsProxyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetHttpsProxy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["target_https_proxy_resource"] = compute.TargetHttpsProxy(authorization_policy='authorization_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["target_https_proxy_resource"] = compute.TargetHttpsProxy(authorization_policy='authorization_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_https_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy='target_https_proxy_value', + target_https_proxy_resource=compute.TargetHttpsProxy(authorization_policy='authorization_policy_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy='target_https_proxy_value', + target_https_proxy_resource=compute.TargetHttpsProxy(authorization_policy='authorization_policy_value'), + ) + + +def test_set_quic_override_rest(transport: str = 'rest', request_type=compute.SetQuicOverrideTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["target_https_proxies_set_quic_override_request_resource"] = compute.TargetHttpsProxiesSetQuicOverrideRequest(quic_override='quic_override_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_quic_override(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_quic_override_rest_bad_request(transport: str = 'rest', request_type=compute.SetQuicOverrideTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["target_https_proxies_set_quic_override_request_resource"] = compute.TargetHttpsProxiesSetQuicOverrideRequest(quic_override='quic_override_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_quic_override(request) + + +def test_set_quic_override_rest_from_dict(): + test_set_quic_override_rest(request_type=dict) + + +def test_set_quic_override_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_https_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy='target_https_proxy_value', + target_https_proxies_set_quic_override_request_resource=compute.TargetHttpsProxiesSetQuicOverrideRequest(quic_override='quic_override_value'), + ) + mock_args.update(sample_request) + client.set_quic_override(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}/setQuicOverride" % client.transport._host, args[1]) + + +def test_set_quic_override_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_quic_override( + compute.SetQuicOverrideTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy='target_https_proxy_value', + target_https_proxies_set_quic_override_request_resource=compute.TargetHttpsProxiesSetQuicOverrideRequest(quic_override='quic_override_value'), + ) + + +def test_set_ssl_certificates_rest(transport: str = 'rest', request_type=compute.SetSslCertificatesTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["target_https_proxies_set_ssl_certificates_request_resource"] = compute.TargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_ssl_certificates(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_ssl_certificates_rest_bad_request(transport: str = 'rest', request_type=compute.SetSslCertificatesTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["target_https_proxies_set_ssl_certificates_request_resource"] = compute.TargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_ssl_certificates(request) + + +def test_set_ssl_certificates_rest_from_dict(): + test_set_ssl_certificates_rest(request_type=dict) + + +def test_set_ssl_certificates_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_https_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy='target_https_proxy_value', + target_https_proxies_set_ssl_certificates_request_resource=compute.TargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']), + ) + mock_args.update(sample_request) + client.set_ssl_certificates(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/targetHttpsProxies/{target_https_proxy}/setSslCertificates" % client.transport._host, args[1]) + + +def test_set_ssl_certificates_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_ssl_certificates( + compute.SetSslCertificatesTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy='target_https_proxy_value', + target_https_proxies_set_ssl_certificates_request_resource=compute.TargetHttpsProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']), + ) + + +def test_set_ssl_policy_rest(transport: str = 'rest', request_type=compute.SetSslPolicyTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["ssl_policy_reference_resource"] = compute.SslPolicyReference(ssl_policy='ssl_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_ssl_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_ssl_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetSslPolicyTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["ssl_policy_reference_resource"] = compute.SslPolicyReference(ssl_policy='ssl_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_ssl_policy(request) + + +def test_set_ssl_policy_rest_from_dict(): + test_set_ssl_policy_rest(request_type=dict) + + +def test_set_ssl_policy_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_https_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy='target_https_proxy_value', + ssl_policy_reference_resource=compute.SslPolicyReference(ssl_policy='ssl_policy_value'), + ) + mock_args.update(sample_request) + client.set_ssl_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetHttpsProxies/{target_https_proxy}/setSslPolicy" % client.transport._host, args[1]) + + +def test_set_ssl_policy_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_ssl_policy( + compute.SetSslPolicyTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy='target_https_proxy_value', + ssl_policy_reference_resource=compute.SslPolicyReference(ssl_policy='ssl_policy_value'), + ) + + +def test_set_url_map_rest(transport: str = 'rest', request_type=compute.SetUrlMapTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_url_map(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_url_map_rest_bad_request(transport: str = 'rest', request_type=compute.SetUrlMapTargetHttpsProxyRequest): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_https_proxy": "sample2"} + request_init["url_map_reference_resource"] = compute.UrlMapReference(url_map='url_map_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_url_map(request) + + +def test_set_url_map_rest_from_dict(): + test_set_url_map_rest(request_type=dict) + + +def test_set_url_map_rest_flattened(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_https_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_https_proxy='target_https_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + mock_args.update(sample_request) + client.set_url_map(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/targetHttpsProxies/{target_https_proxy}/setUrlMap" % client.transport._host, args[1]) + + +def test_set_url_map_rest_flattened_error(transport: str = 'rest'): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_url_map( + compute.SetUrlMapTargetHttpsProxyRequest(), + project='project_value', + target_https_proxy='target_https_proxy_value', + url_map_reference_resource=compute.UrlMapReference(url_map='url_map_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetHttpsProxiesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetHttpsProxiesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetHttpsProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetHttpsProxiesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetHttpsProxiesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_https_proxies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetHttpsProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_https_proxies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_https_proxies.transports.TargetHttpsProxiesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetHttpsProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + 'patch', + 'set_quic_override', + 'set_ssl_certificates', + 'set_ssl_policy', + 'set_url_map', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_https_proxies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_https_proxies.transports.TargetHttpsProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetHttpsProxiesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_https_proxies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_https_proxies.transports.TargetHttpsProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetHttpsProxiesTransport() + adc.assert_called_once() + + +def test_target_https_proxies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetHttpsProxiesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_https_proxies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetHttpsProxiesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_https_proxies_host_no_port(): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_https_proxies_host_with_port(): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetHttpsProxiesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetHttpsProxiesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpsProxiesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetHttpsProxiesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetHttpsProxiesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpsProxiesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetHttpsProxiesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetHttpsProxiesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpsProxiesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetHttpsProxiesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetHttpsProxiesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpsProxiesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetHttpsProxiesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetHttpsProxiesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetHttpsProxiesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetHttpsProxiesTransport, '_prep_wrapped_messages') as prep: + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetHttpsProxiesTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetHttpsProxiesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetHttpsProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_instances.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_instances.py new file mode 100644 index 000000000..eed87a829 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_instances.py @@ -0,0 +1,1430 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_instances import TargetInstancesClient +from google.cloud.compute_v1.services.target_instances import pagers +from google.cloud.compute_v1.services.target_instances import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetInstancesClient._get_default_mtls_endpoint(None) is None + assert TargetInstancesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetInstancesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetInstancesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetInstancesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetInstancesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetInstancesClient, +]) +def test_target_instances_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetInstancesRestTransport, "rest"), +]) +def test_target_instances_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetInstancesClient, +]) +def test_target_instances_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_instances_client_get_transport_class(): + transport = TargetInstancesClient.get_transport_class() + available_transports = [ + transports.TargetInstancesRestTransport, + ] + assert transport in available_transports + + transport = TargetInstancesClient.get_transport_class("rest") + assert transport == transports.TargetInstancesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetInstancesClient, transports.TargetInstancesRestTransport, "rest"), +]) +@mock.patch.object(TargetInstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetInstancesClient)) +def test_target_instances_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetInstancesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetInstancesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetInstancesClient, transports.TargetInstancesRestTransport, "rest", "true"), + (TargetInstancesClient, transports.TargetInstancesRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetInstancesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetInstancesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_instances_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetInstancesClient, transports.TargetInstancesRestTransport, "rest"), +]) +def test_target_instances_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetInstancesClient, transports.TargetInstancesRestTransport, "rest"), +]) +def test_target_instances_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListTargetInstancesRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetInstanceAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetInstanceAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListTargetInstancesRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetInstanceAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetInstanceAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/targetInstances" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListTargetInstancesRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetInstanceAggregatedList( + items={ + 'a':compute.TargetInstancesScopedList(), + 'b':compute.TargetInstancesScopedList(), + 'c':compute.TargetInstancesScopedList(), + }, + next_page_token='abc', + ), + compute.TargetInstanceAggregatedList( + items={}, + next_page_token='def', + ), + compute.TargetInstanceAggregatedList( + items={ + 'g':compute.TargetInstancesScopedList(), + }, + next_page_token='ghi', + ), + compute.TargetInstanceAggregatedList( + items={ + 'h':compute.TargetInstancesScopedList(), + 'i':compute.TargetInstancesScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetInstanceAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.TargetInstancesScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.TargetInstancesScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.TargetInstancesScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetInstanceRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "target_instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetInstanceRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "target_instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "target_instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + target_instance='target_instance_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/targetInstances/{target_instance}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetInstanceRequest(), + project='project_value', + zone='zone_value', + target_instance='target_instance_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetInstanceRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "target_instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetInstance( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + instance='instance_value', + kind='kind_value', + name='name_value', + nat_policy='nat_policy_value', + network='network_value', + self_link='self_link_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetInstance.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetInstance) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.instance == 'instance_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.nat_policy == 'nat_policy_value' + assert response.network == 'network_value' + assert response.self_link == 'self_link_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetInstanceRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "target_instance": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetInstance() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetInstance.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "target_instance": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + target_instance='target_instance_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/targetInstances/{target_instance}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetInstanceRequest(), + project='project_value', + zone='zone_value', + target_instance='target_instance_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetInstanceRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["target_instance_resource"] = compute.TargetInstance(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetInstanceRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request_init["target_instance_resource"] = compute.TargetInstance(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + target_instance_resource=compute.TargetInstance(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/targetInstances" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetInstanceRequest(), + project='project_value', + zone='zone_value', + target_instance_resource=compute.TargetInstance(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetInstancesRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetInstanceList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetInstanceList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetInstancesRequest): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetInstanceList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetInstanceList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/targetInstances" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetInstancesRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetInstanceList( + items=[ + compute.TargetInstance(), + compute.TargetInstance(), + compute.TargetInstance(), + ], + next_page_token='abc', + ), + compute.TargetInstanceList( + items=[], + next_page_token='def', + ), + compute.TargetInstanceList( + items=[ + compute.TargetInstance(), + ], + next_page_token='ghi', + ), + compute.TargetInstanceList( + items=[ + compute.TargetInstance(), + compute.TargetInstance(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetInstanceList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetInstance) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetInstancesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetInstancesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetInstancesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetInstancesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetInstancesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_instances_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetInstancesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_instances_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_instances.transports.TargetInstancesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetInstancesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_instances_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_instances.transports.TargetInstancesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetInstancesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_instances_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_instances.transports.TargetInstancesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetInstancesTransport() + adc.assert_called_once() + + +def test_target_instances_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetInstancesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_instances_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetInstancesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_instances_host_no_port(): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_instances_host_with_port(): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetInstancesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetInstancesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetInstancesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetInstancesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetInstancesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetInstancesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetInstancesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetInstancesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetInstancesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetInstancesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetInstancesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetInstancesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetInstancesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetInstancesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetInstancesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetInstancesTransport, '_prep_wrapped_messages') as prep: + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetInstancesTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetInstancesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetInstancesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_pools.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_pools.py new file mode 100644 index 000000000..c76b9f924 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_pools.py @@ -0,0 +1,2328 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_pools import TargetPoolsClient +from google.cloud.compute_v1.services.target_pools import pagers +from google.cloud.compute_v1.services.target_pools import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetPoolsClient._get_default_mtls_endpoint(None) is None + assert TargetPoolsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetPoolsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetPoolsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetPoolsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetPoolsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetPoolsClient, +]) +def test_target_pools_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetPoolsRestTransport, "rest"), +]) +def test_target_pools_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetPoolsClient, +]) +def test_target_pools_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_pools_client_get_transport_class(): + transport = TargetPoolsClient.get_transport_class() + available_transports = [ + transports.TargetPoolsRestTransport, + ] + assert transport in available_transports + + transport = TargetPoolsClient.get_transport_class("rest") + assert transport == transports.TargetPoolsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetPoolsClient, transports.TargetPoolsRestTransport, "rest"), +]) +@mock.patch.object(TargetPoolsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetPoolsClient)) +def test_target_pools_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetPoolsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetPoolsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetPoolsClient, transports.TargetPoolsRestTransport, "rest", "true"), + (TargetPoolsClient, transports.TargetPoolsRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetPoolsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetPoolsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_pools_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetPoolsClient, transports.TargetPoolsRestTransport, "rest"), +]) +def test_target_pools_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetPoolsClient, transports.TargetPoolsRestTransport, "rest"), +]) +def test_target_pools_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_add_health_check_rest(transport: str = 'rest', request_type=compute.AddHealthCheckTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_add_health_check_request_resource"] = compute.TargetPoolsAddHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_health_check(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_health_check_rest_bad_request(transport: str = 'rest', request_type=compute.AddHealthCheckTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_add_health_check_request_resource"] = compute.TargetPoolsAddHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_health_check(request) + + +def test_add_health_check_rest_from_dict(): + test_add_health_check_rest(request_type=dict) + + +def test_add_health_check_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_add_health_check_request_resource=compute.TargetPoolsAddHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]), + ) + mock_args.update(sample_request) + client.add_health_check(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/addHealthCheck" % client.transport._host, args[1]) + + +def test_add_health_check_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_health_check( + compute.AddHealthCheckTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_add_health_check_request_resource=compute.TargetPoolsAddHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]), + ) + + +def test_add_instance_rest(transport: str = 'rest', request_type=compute.AddInstanceTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_add_instance_request_resource"] = compute.TargetPoolsAddInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.add_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_add_instance_rest_bad_request(transport: str = 'rest', request_type=compute.AddInstanceTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_add_instance_request_resource"] = compute.TargetPoolsAddInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.add_instance(request) + + +def test_add_instance_rest_from_dict(): + test_add_instance_rest(request_type=dict) + + +def test_add_instance_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_add_instance_request_resource=compute.TargetPoolsAddInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + mock_args.update(sample_request) + client.add_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/addInstance" % client.transport._host, args[1]) + + +def test_add_instance_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.add_instance( + compute.AddInstanceTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_add_instance_request_resource=compute.TargetPoolsAddInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListTargetPoolsRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPoolAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPoolAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListTargetPoolsRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPoolAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPoolAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/targetPools" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListTargetPoolsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetPoolAggregatedList( + items={ + 'a':compute.TargetPoolsScopedList(), + 'b':compute.TargetPoolsScopedList(), + 'c':compute.TargetPoolsScopedList(), + }, + next_page_token='abc', + ), + compute.TargetPoolAggregatedList( + items={}, + next_page_token='def', + ), + compute.TargetPoolAggregatedList( + items={ + 'g':compute.TargetPoolsScopedList(), + }, + next_page_token='ghi', + ), + compute.TargetPoolAggregatedList( + items={ + 'h':compute.TargetPoolsScopedList(), + 'i':compute.TargetPoolsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetPoolAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.TargetPoolsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.TargetPoolsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.TargetPoolsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPool( + backup_pool='backup_pool_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + failover_ratio=0.1494, + health_checks=['health_checks_value'], + id=205, + instances=['instances_value'], + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + session_affinity='session_affinity_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPool.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetPool) + assert response.backup_pool == 'backup_pool_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert math.isclose(response.failover_ratio, 0.1494, rel_tol=1e-6) + assert response.health_checks == ['health_checks_value'] + assert response.id == 205 + assert response.instances == ['instances_value'] + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.session_affinity == 'session_affinity_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPool() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPool.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + ) + + +def test_get_health_rest(transport: str = 'rest', request_type=compute.GetHealthTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["instance_reference_resource"] = compute.InstanceReference(instance='instance_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPoolInstanceHealth( + kind='kind_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPoolInstanceHealth.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_health(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetPoolInstanceHealth) + assert response.kind == 'kind_value' + + +def test_get_health_rest_bad_request(transport: str = 'rest', request_type=compute.GetHealthTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["instance_reference_resource"] = compute.InstanceReference(instance='instance_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_health(request) + + +def test_get_health_rest_from_dict(): + test_get_health_rest(request_type=dict) + + +def test_get_health_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPoolInstanceHealth() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPoolInstanceHealth.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + instance_reference_resource=compute.InstanceReference(instance='instance_value'), + ) + mock_args.update(sample_request) + client.get_health(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/getHealth" % client.transport._host, args[1]) + + +def test_get_health_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_health( + compute.GetHealthTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + instance_reference_resource=compute.InstanceReference(instance='instance_value'), + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_pool_resource"] = compute.TargetPool(backup_pool='backup_pool_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_pool_resource"] = compute.TargetPool(backup_pool='backup_pool_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool_resource=compute.TargetPool(backup_pool='backup_pool_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool_resource=compute.TargetPool(backup_pool='backup_pool_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetPoolsRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPoolList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPoolList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetPoolsRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetPoolList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetPoolList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetPoolsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetPoolList( + items=[ + compute.TargetPool(), + compute.TargetPool(), + compute.TargetPool(), + ], + next_page_token='abc', + ), + compute.TargetPoolList( + items=[], + next_page_token='def', + ), + compute.TargetPoolList( + items=[ + compute.TargetPool(), + ], + next_page_token='ghi', + ), + compute.TargetPoolList( + items=[ + compute.TargetPool(), + compute.TargetPool(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetPoolList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetPool) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_remove_health_check_rest(transport: str = 'rest', request_type=compute.RemoveHealthCheckTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_remove_health_check_request_resource"] = compute.TargetPoolsRemoveHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_health_check(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_health_check_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveHealthCheckTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_remove_health_check_request_resource"] = compute.TargetPoolsRemoveHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_health_check(request) + + +def test_remove_health_check_rest_from_dict(): + test_remove_health_check_rest(request_type=dict) + + +def test_remove_health_check_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_remove_health_check_request_resource=compute.TargetPoolsRemoveHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]), + ) + mock_args.update(sample_request) + client.remove_health_check(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/removeHealthCheck" % client.transport._host, args[1]) + + +def test_remove_health_check_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_health_check( + compute.RemoveHealthCheckTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_remove_health_check_request_resource=compute.TargetPoolsRemoveHealthCheckRequest(health_checks=[compute.HealthCheckReference(health_check='health_check_value')]), + ) + + +def test_remove_instance_rest(transport: str = 'rest', request_type=compute.RemoveInstanceTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_remove_instance_request_resource"] = compute.TargetPoolsRemoveInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.remove_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_remove_instance_rest_bad_request(transport: str = 'rest', request_type=compute.RemoveInstanceTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_pools_remove_instance_request_resource"] = compute.TargetPoolsRemoveInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.remove_instance(request) + + +def test_remove_instance_rest_from_dict(): + test_remove_instance_rest(request_type=dict) + + +def test_remove_instance_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_remove_instance_request_resource=compute.TargetPoolsRemoveInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + mock_args.update(sample_request) + client.remove_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/removeInstance" % client.transport._host, args[1]) + + +def test_remove_instance_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.remove_instance( + compute.RemoveInstanceTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_pools_remove_instance_request_resource=compute.TargetPoolsRemoveInstanceRequest(instances=[compute.InstanceReference(instance='instance_value')]), + ) + + +def test_set_backup_rest(transport: str = 'rest', request_type=compute.SetBackupTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_reference_resource"] = compute.TargetReference(target='target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_backup_rest_bad_request(transport: str = 'rest', request_type=compute.SetBackupTargetPoolRequest): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + request_init["target_reference_resource"] = compute.TargetReference(target='target_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_backup(request) + + +def test_set_backup_rest_from_dict(): + test_set_backup_rest(request_type=dict) + + +def test_set_backup_rest_flattened(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_pool": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_reference_resource=compute.TargetReference(target='target_value'), + ) + mock_args.update(sample_request) + client.set_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetPools/{target_pool}/setBackup" % client.transport._host, args[1]) + + +def test_set_backup_rest_flattened_error(transport: str = 'rest'): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_backup( + compute.SetBackupTargetPoolRequest(), + project='project_value', + region='region_value', + target_pool='target_pool_value', + target_reference_resource=compute.TargetReference(target='target_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetPoolsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetPoolsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetPoolsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetPoolsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetPoolsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetPoolsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetPoolsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetPoolsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_pools_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetPoolsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_pools_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_pools.transports.TargetPoolsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetPoolsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'add_health_check', + 'add_instance', + 'aggregated_list', + 'delete', + 'get', + 'get_health', + 'insert', + 'list', + 'remove_health_check', + 'remove_instance', + 'set_backup', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_pools_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_pools.transports.TargetPoolsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetPoolsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_pools_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_pools.transports.TargetPoolsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetPoolsTransport() + adc.assert_called_once() + + +def test_target_pools_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetPoolsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_pools_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetPoolsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_pools_host_no_port(): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_pools_host_with_port(): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetPoolsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetPoolsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetPoolsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetPoolsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetPoolsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetPoolsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetPoolsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetPoolsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetPoolsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetPoolsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetPoolsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetPoolsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetPoolsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetPoolsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetPoolsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetPoolsTransport, '_prep_wrapped_messages') as prep: + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetPoolsTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetPoolsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetPoolsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_ssl_proxies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_ssl_proxies.py new file mode 100644 index 000000000..a3cd355fc --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_ssl_proxies.py @@ -0,0 +1,1853 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_ssl_proxies import TargetSslProxiesClient +from google.cloud.compute_v1.services.target_ssl_proxies import pagers +from google.cloud.compute_v1.services.target_ssl_proxies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetSslProxiesClient._get_default_mtls_endpoint(None) is None + assert TargetSslProxiesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetSslProxiesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetSslProxiesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetSslProxiesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetSslProxiesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetSslProxiesClient, +]) +def test_target_ssl_proxies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetSslProxiesRestTransport, "rest"), +]) +def test_target_ssl_proxies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetSslProxiesClient, +]) +def test_target_ssl_proxies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_ssl_proxies_client_get_transport_class(): + transport = TargetSslProxiesClient.get_transport_class() + available_transports = [ + transports.TargetSslProxiesRestTransport, + ] + assert transport in available_transports + + transport = TargetSslProxiesClient.get_transport_class("rest") + assert transport == transports.TargetSslProxiesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetSslProxiesClient, transports.TargetSslProxiesRestTransport, "rest"), +]) +@mock.patch.object(TargetSslProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetSslProxiesClient)) +def test_target_ssl_proxies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetSslProxiesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetSslProxiesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetSslProxiesClient, transports.TargetSslProxiesRestTransport, "rest", "true"), + (TargetSslProxiesClient, transports.TargetSslProxiesRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetSslProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetSslProxiesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_ssl_proxies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetSslProxiesClient, transports.TargetSslProxiesRestTransport, "rest"), +]) +def test_target_ssl_proxies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetSslProxiesClient, transports.TargetSslProxiesRestTransport, "rest"), +]) +def test_target_ssl_proxies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_ssl_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetSslProxyRequest(), + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetSslProxy( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + proxy_header='proxy_header_value', + self_link='self_link_value', + service='service_value', + ssl_certificates=['ssl_certificates_value'], + ssl_policy='ssl_policy_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetSslProxy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetSslProxy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.proxy_header == 'proxy_header_value' + assert response.self_link == 'self_link_value' + assert response.service == 'service_value' + assert response.ssl_certificates == ['ssl_certificates_value'] + assert response.ssl_policy == 'ssl_policy_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetSslProxy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetSslProxy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_ssl_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetSslProxyRequest(), + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_ssl_proxy_resource"] = compute.TargetSslProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_ssl_proxy_resource"] = compute.TargetSslProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_ssl_proxy_resource=compute.TargetSslProxy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetSslProxyRequest(), + project='project_value', + target_ssl_proxy_resource=compute.TargetSslProxy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetSslProxiesRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetSslProxyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetSslProxyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetSslProxiesRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetSslProxyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetSslProxyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetSslProxiesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetSslProxyList( + items=[ + compute.TargetSslProxy(), + compute.TargetSslProxy(), + compute.TargetSslProxy(), + ], + next_page_token='abc', + ), + compute.TargetSslProxyList( + items=[], + next_page_token='def', + ), + compute.TargetSslProxyList( + items=[ + compute.TargetSslProxy(), + ], + next_page_token='ghi', + ), + compute.TargetSslProxyList( + items=[ + compute.TargetSslProxy(), + compute.TargetSslProxy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetSslProxyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetSslProxy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_backend_service_rest(transport: str = 'rest', request_type=compute.SetBackendServiceTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["target_ssl_proxies_set_backend_service_request_resource"] = compute.TargetSslProxiesSetBackendServiceRequest(service='service_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_backend_service(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_backend_service_rest_bad_request(transport: str = 'rest', request_type=compute.SetBackendServiceTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["target_ssl_proxies_set_backend_service_request_resource"] = compute.TargetSslProxiesSetBackendServiceRequest(service='service_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_backend_service(request) + + +def test_set_backend_service_rest_from_dict(): + test_set_backend_service_rest(request_type=dict) + + +def test_set_backend_service_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_ssl_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + target_ssl_proxies_set_backend_service_request_resource=compute.TargetSslProxiesSetBackendServiceRequest(service='service_value'), + ) + mock_args.update(sample_request) + client.set_backend_service(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setBackendService" % client.transport._host, args[1]) + + +def test_set_backend_service_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_backend_service( + compute.SetBackendServiceTargetSslProxyRequest(), + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + target_ssl_proxies_set_backend_service_request_resource=compute.TargetSslProxiesSetBackendServiceRequest(service='service_value'), + ) + + +def test_set_proxy_header_rest(transport: str = 'rest', request_type=compute.SetProxyHeaderTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["target_ssl_proxies_set_proxy_header_request_resource"] = compute.TargetSslProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_proxy_header(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_proxy_header_rest_bad_request(transport: str = 'rest', request_type=compute.SetProxyHeaderTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["target_ssl_proxies_set_proxy_header_request_resource"] = compute.TargetSslProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_proxy_header(request) + + +def test_set_proxy_header_rest_from_dict(): + test_set_proxy_header_rest(request_type=dict) + + +def test_set_proxy_header_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_ssl_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + target_ssl_proxies_set_proxy_header_request_resource=compute.TargetSslProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value'), + ) + mock_args.update(sample_request) + client.set_proxy_header(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setProxyHeader" % client.transport._host, args[1]) + + +def test_set_proxy_header_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_proxy_header( + compute.SetProxyHeaderTargetSslProxyRequest(), + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + target_ssl_proxies_set_proxy_header_request_resource=compute.TargetSslProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value'), + ) + + +def test_set_ssl_certificates_rest(transport: str = 'rest', request_type=compute.SetSslCertificatesTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["target_ssl_proxies_set_ssl_certificates_request_resource"] = compute.TargetSslProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_ssl_certificates(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_ssl_certificates_rest_bad_request(transport: str = 'rest', request_type=compute.SetSslCertificatesTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["target_ssl_proxies_set_ssl_certificates_request_resource"] = compute.TargetSslProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_ssl_certificates(request) + + +def test_set_ssl_certificates_rest_from_dict(): + test_set_ssl_certificates_rest(request_type=dict) + + +def test_set_ssl_certificates_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_ssl_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + target_ssl_proxies_set_ssl_certificates_request_resource=compute.TargetSslProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']), + ) + mock_args.update(sample_request) + client.set_ssl_certificates(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setSslCertificates" % client.transport._host, args[1]) + + +def test_set_ssl_certificates_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_ssl_certificates( + compute.SetSslCertificatesTargetSslProxyRequest(), + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + target_ssl_proxies_set_ssl_certificates_request_resource=compute.TargetSslProxiesSetSslCertificatesRequest(ssl_certificates=['ssl_certificates_value']), + ) + + +def test_set_ssl_policy_rest(transport: str = 'rest', request_type=compute.SetSslPolicyTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["ssl_policy_reference_resource"] = compute.SslPolicyReference(ssl_policy='ssl_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_ssl_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_ssl_policy_rest_bad_request(transport: str = 'rest', request_type=compute.SetSslPolicyTargetSslProxyRequest): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_ssl_proxy": "sample2"} + request_init["ssl_policy_reference_resource"] = compute.SslPolicyReference(ssl_policy='ssl_policy_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_ssl_policy(request) + + +def test_set_ssl_policy_rest_from_dict(): + test_set_ssl_policy_rest(request_type=dict) + + +def test_set_ssl_policy_rest_flattened(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_ssl_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + ssl_policy_reference_resource=compute.SslPolicyReference(ssl_policy='ssl_policy_value'), + ) + mock_args.update(sample_request) + client.set_ssl_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetSslProxies/{target_ssl_proxy}/setSslPolicy" % client.transport._host, args[1]) + + +def test_set_ssl_policy_rest_flattened_error(transport: str = 'rest'): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_ssl_policy( + compute.SetSslPolicyTargetSslProxyRequest(), + project='project_value', + target_ssl_proxy='target_ssl_proxy_value', + ssl_policy_reference_resource=compute.SslPolicyReference(ssl_policy='ssl_policy_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetSslProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetSslProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetSslProxiesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetSslProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetSslProxiesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetSslProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetSslProxiesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetSslProxiesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_ssl_proxies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetSslProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_ssl_proxies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_ssl_proxies.transports.TargetSslProxiesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetSslProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'set_backend_service', + 'set_proxy_header', + 'set_ssl_certificates', + 'set_ssl_policy', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_ssl_proxies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_ssl_proxies.transports.TargetSslProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetSslProxiesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_ssl_proxies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_ssl_proxies.transports.TargetSslProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetSslProxiesTransport() + adc.assert_called_once() + + +def test_target_ssl_proxies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetSslProxiesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_ssl_proxies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetSslProxiesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_ssl_proxies_host_no_port(): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_ssl_proxies_host_with_port(): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetSslProxiesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetSslProxiesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetSslProxiesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetSslProxiesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetSslProxiesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetSslProxiesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetSslProxiesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetSslProxiesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetSslProxiesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetSslProxiesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetSslProxiesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetSslProxiesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetSslProxiesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetSslProxiesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetSslProxiesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetSslProxiesTransport, '_prep_wrapped_messages') as prep: + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetSslProxiesTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetSslProxiesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetSslProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_tcp_proxies.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_tcp_proxies.py new file mode 100644 index 000000000..4d59d8053 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_tcp_proxies.py @@ -0,0 +1,1543 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_tcp_proxies import TargetTcpProxiesClient +from google.cloud.compute_v1.services.target_tcp_proxies import pagers +from google.cloud.compute_v1.services.target_tcp_proxies import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetTcpProxiesClient._get_default_mtls_endpoint(None) is None + assert TargetTcpProxiesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetTcpProxiesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetTcpProxiesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetTcpProxiesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetTcpProxiesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetTcpProxiesClient, +]) +def test_target_tcp_proxies_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetTcpProxiesRestTransport, "rest"), +]) +def test_target_tcp_proxies_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetTcpProxiesClient, +]) +def test_target_tcp_proxies_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_tcp_proxies_client_get_transport_class(): + transport = TargetTcpProxiesClient.get_transport_class() + available_transports = [ + transports.TargetTcpProxiesRestTransport, + ] + assert transport in available_transports + + transport = TargetTcpProxiesClient.get_transport_class("rest") + assert transport == transports.TargetTcpProxiesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetTcpProxiesClient, transports.TargetTcpProxiesRestTransport, "rest"), +]) +@mock.patch.object(TargetTcpProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetTcpProxiesClient)) +def test_target_tcp_proxies_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetTcpProxiesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetTcpProxiesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetTcpProxiesClient, transports.TargetTcpProxiesRestTransport, "rest", "true"), + (TargetTcpProxiesClient, transports.TargetTcpProxiesRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetTcpProxiesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetTcpProxiesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_tcp_proxies_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetTcpProxiesClient, transports.TargetTcpProxiesRestTransport, "rest"), +]) +def test_target_tcp_proxies_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetTcpProxiesClient, transports.TargetTcpProxiesRestTransport, "rest"), +]) +def test_target_tcp_proxies_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_tcp_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetTcpProxyRequest(), + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetTcpProxy( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + proxy_bind=True, + proxy_header='proxy_header_value', + self_link='self_link_value', + service='service_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetTcpProxy.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetTcpProxy) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.proxy_bind is True + assert response.proxy_header == 'proxy_header_value' + assert response.self_link == 'self_link_value' + assert response.service == 'service_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetTcpProxy() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetTcpProxy.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_tcp_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetTcpProxyRequest(), + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_tcp_proxy_resource"] = compute.TargetTcpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["target_tcp_proxy_resource"] = compute.TargetTcpProxy(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_tcp_proxy_resource=compute.TargetTcpProxy(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetTcpProxies" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetTcpProxyRequest(), + project='project_value', + target_tcp_proxy_resource=compute.TargetTcpProxy(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetTcpProxiesRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetTcpProxyList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetTcpProxyList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetTcpProxiesRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetTcpProxyList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetTcpProxyList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetTcpProxies" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetTcpProxiesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetTcpProxyList( + items=[ + compute.TargetTcpProxy(), + compute.TargetTcpProxy(), + compute.TargetTcpProxy(), + ], + next_page_token='abc', + ), + compute.TargetTcpProxyList( + items=[], + next_page_token='def', + ), + compute.TargetTcpProxyList( + items=[ + compute.TargetTcpProxy(), + ], + next_page_token='ghi', + ), + compute.TargetTcpProxyList( + items=[ + compute.TargetTcpProxy(), + compute.TargetTcpProxy(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetTcpProxyList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetTcpProxy) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_backend_service_rest(transport: str = 'rest', request_type=compute.SetBackendServiceTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request_init["target_tcp_proxies_set_backend_service_request_resource"] = compute.TargetTcpProxiesSetBackendServiceRequest(service='service_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_backend_service(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_backend_service_rest_bad_request(transport: str = 'rest', request_type=compute.SetBackendServiceTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request_init["target_tcp_proxies_set_backend_service_request_resource"] = compute.TargetTcpProxiesSetBackendServiceRequest(service='service_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_backend_service(request) + + +def test_set_backend_service_rest_from_dict(): + test_set_backend_service_rest(request_type=dict) + + +def test_set_backend_service_rest_flattened(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_tcp_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + target_tcp_proxies_set_backend_service_request_resource=compute.TargetTcpProxiesSetBackendServiceRequest(service='service_value'), + ) + mock_args.update(sample_request) + client.set_backend_service(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}/setBackendService" % client.transport._host, args[1]) + + +def test_set_backend_service_rest_flattened_error(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_backend_service( + compute.SetBackendServiceTargetTcpProxyRequest(), + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + target_tcp_proxies_set_backend_service_request_resource=compute.TargetTcpProxiesSetBackendServiceRequest(service='service_value'), + ) + + +def test_set_proxy_header_rest(transport: str = 'rest', request_type=compute.SetProxyHeaderTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request_init["target_tcp_proxies_set_proxy_header_request_resource"] = compute.TargetTcpProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_proxy_header(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_proxy_header_rest_bad_request(transport: str = 'rest', request_type=compute.SetProxyHeaderTargetTcpProxyRequest): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "target_tcp_proxy": "sample2"} + request_init["target_tcp_proxies_set_proxy_header_request_resource"] = compute.TargetTcpProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_proxy_header(request) + + +def test_set_proxy_header_rest_from_dict(): + test_set_proxy_header_rest(request_type=dict) + + +def test_set_proxy_header_rest_flattened(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "target_tcp_proxy": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + target_tcp_proxies_set_proxy_header_request_resource=compute.TargetTcpProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value'), + ) + mock_args.update(sample_request) + client.set_proxy_header(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/targetTcpProxies/{target_tcp_proxy}/setProxyHeader" % client.transport._host, args[1]) + + +def test_set_proxy_header_rest_flattened_error(transport: str = 'rest'): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_proxy_header( + compute.SetProxyHeaderTargetTcpProxyRequest(), + project='project_value', + target_tcp_proxy='target_tcp_proxy_value', + target_tcp_proxies_set_proxy_header_request_resource=compute.TargetTcpProxiesSetProxyHeaderRequest(proxy_header='proxy_header_value'), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetTcpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetTcpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetTcpProxiesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetTcpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetTcpProxiesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetTcpProxiesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetTcpProxiesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetTcpProxiesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_tcp_proxies_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetTcpProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_tcp_proxies_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_tcp_proxies.transports.TargetTcpProxiesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetTcpProxiesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'insert', + 'list', + 'set_backend_service', + 'set_proxy_header', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_tcp_proxies_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_tcp_proxies.transports.TargetTcpProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetTcpProxiesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_tcp_proxies_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_tcp_proxies.transports.TargetTcpProxiesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetTcpProxiesTransport() + adc.assert_called_once() + + +def test_target_tcp_proxies_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetTcpProxiesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_tcp_proxies_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetTcpProxiesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_tcp_proxies_host_no_port(): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_tcp_proxies_host_with_port(): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetTcpProxiesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetTcpProxiesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetTcpProxiesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetTcpProxiesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetTcpProxiesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetTcpProxiesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetTcpProxiesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetTcpProxiesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetTcpProxiesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetTcpProxiesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetTcpProxiesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetTcpProxiesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetTcpProxiesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetTcpProxiesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetTcpProxiesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetTcpProxiesTransport, '_prep_wrapped_messages') as prep: + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetTcpProxiesTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetTcpProxiesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetTcpProxiesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_vpn_gateways.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_vpn_gateways.py new file mode 100644 index 000000000..3a4c60d41 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_target_vpn_gateways.py @@ -0,0 +1,1432 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.target_vpn_gateways import TargetVpnGatewaysClient +from google.cloud.compute_v1.services.target_vpn_gateways import pagers +from google.cloud.compute_v1.services.target_vpn_gateways import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert TargetVpnGatewaysClient._get_default_mtls_endpoint(None) is None + assert TargetVpnGatewaysClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert TargetVpnGatewaysClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert TargetVpnGatewaysClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert TargetVpnGatewaysClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert TargetVpnGatewaysClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + TargetVpnGatewaysClient, +]) +def test_target_vpn_gateways_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.TargetVpnGatewaysRestTransport, "rest"), +]) +def test_target_vpn_gateways_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + TargetVpnGatewaysClient, +]) +def test_target_vpn_gateways_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_vpn_gateways_client_get_transport_class(): + transport = TargetVpnGatewaysClient.get_transport_class() + available_transports = [ + transports.TargetVpnGatewaysRestTransport, + ] + assert transport in available_transports + + transport = TargetVpnGatewaysClient.get_transport_class("rest") + assert transport == transports.TargetVpnGatewaysRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetVpnGatewaysClient, transports.TargetVpnGatewaysRestTransport, "rest"), +]) +@mock.patch.object(TargetVpnGatewaysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetVpnGatewaysClient)) +def test_target_vpn_gateways_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(TargetVpnGatewaysClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(TargetVpnGatewaysClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (TargetVpnGatewaysClient, transports.TargetVpnGatewaysRestTransport, "rest", "true"), + (TargetVpnGatewaysClient, transports.TargetVpnGatewaysRestTransport, "rest", "false"), +]) +@mock.patch.object(TargetVpnGatewaysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(TargetVpnGatewaysClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_target_vpn_gateways_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetVpnGatewaysClient, transports.TargetVpnGatewaysRestTransport, "rest"), +]) +def test_target_vpn_gateways_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (TargetVpnGatewaysClient, transports.TargetVpnGatewaysRestTransport, "rest"), +]) +def test_target_vpn_gateways_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListTargetVpnGatewaysRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetVpnGatewayAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetVpnGatewayAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListTargetVpnGatewaysRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetVpnGatewayAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetVpnGatewayAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/targetVpnGateways" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListTargetVpnGatewaysRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetVpnGatewayAggregatedList( + items={ + 'a':compute.TargetVpnGatewaysScopedList(), + 'b':compute.TargetVpnGatewaysScopedList(), + 'c':compute.TargetVpnGatewaysScopedList(), + }, + next_page_token='abc', + ), + compute.TargetVpnGatewayAggregatedList( + items={}, + next_page_token='def', + ), + compute.TargetVpnGatewayAggregatedList( + items={ + 'g':compute.TargetVpnGatewaysScopedList(), + }, + next_page_token='ghi', + ), + compute.TargetVpnGatewayAggregatedList( + items={ + 'h':compute.TargetVpnGatewaysScopedList(), + 'i':compute.TargetVpnGatewaysScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetVpnGatewayAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.TargetVpnGatewaysScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.TargetVpnGatewaysScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.TargetVpnGatewaysScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteTargetVpnGatewayRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteTargetVpnGatewayRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_vpn_gateway": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_vpn_gateway='target_vpn_gateway_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{target_vpn_gateway}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteTargetVpnGatewayRequest(), + project='project_value', + region='region_value', + target_vpn_gateway='target_vpn_gateway_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetTargetVpnGatewayRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetVpnGateway( + creation_timestamp='creation_timestamp_value', + description='description_value', + forwarding_rules=['forwarding_rules_value'], + id=205, + kind='kind_value', + name='name_value', + network='network_value', + region='region_value', + self_link='self_link_value', + status='status_value', + tunnels=['tunnels_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetVpnGateway.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TargetVpnGateway) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.forwarding_rules == ['forwarding_rules_value'] + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.tunnels == ['tunnels_value'] + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetTargetVpnGatewayRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "target_vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetVpnGateway() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetVpnGateway.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "target_vpn_gateway": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_vpn_gateway='target_vpn_gateway_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetVpnGateways/{target_vpn_gateway}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetTargetVpnGatewayRequest(), + project='project_value', + region='region_value', + target_vpn_gateway='target_vpn_gateway_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertTargetVpnGatewayRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_vpn_gateway_resource"] = compute.TargetVpnGateway(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertTargetVpnGatewayRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["target_vpn_gateway_resource"] = compute.TargetVpnGateway(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + target_vpn_gateway_resource=compute.TargetVpnGateway(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetVpnGateways" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertTargetVpnGatewayRequest(), + project='project_value', + region='region_value', + target_vpn_gateway_resource=compute.TargetVpnGateway(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListTargetVpnGatewaysRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetVpnGatewayList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetVpnGatewayList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListTargetVpnGatewaysRequest): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TargetVpnGatewayList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TargetVpnGatewayList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/targetVpnGateways" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListTargetVpnGatewaysRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.TargetVpnGatewayList( + items=[ + compute.TargetVpnGateway(), + compute.TargetVpnGateway(), + compute.TargetVpnGateway(), + ], + next_page_token='abc', + ), + compute.TargetVpnGatewayList( + items=[], + next_page_token='def', + ), + compute.TargetVpnGatewayList( + items=[ + compute.TargetVpnGateway(), + ], + next_page_token='ghi', + ), + compute.TargetVpnGatewayList( + items=[ + compute.TargetVpnGateway(), + compute.TargetVpnGateway(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.TargetVpnGatewayList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.TargetVpnGateway) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.TargetVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.TargetVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetVpnGatewaysClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.TargetVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = TargetVpnGatewaysClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.TargetVpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = TargetVpnGatewaysClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.TargetVpnGatewaysRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_target_vpn_gateways_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.TargetVpnGatewaysTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_target_vpn_gateways_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.target_vpn_gateways.transports.TargetVpnGatewaysTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.TargetVpnGatewaysTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_target_vpn_gateways_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.target_vpn_gateways.transports.TargetVpnGatewaysTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetVpnGatewaysTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_target_vpn_gateways_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.target_vpn_gateways.transports.TargetVpnGatewaysTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.TargetVpnGatewaysTransport() + adc.assert_called_once() + + +def test_target_vpn_gateways_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + TargetVpnGatewaysClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_target_vpn_gateways_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.TargetVpnGatewaysRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_target_vpn_gateways_host_no_port(): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_target_vpn_gateways_host_with_port(): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = TargetVpnGatewaysClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = TargetVpnGatewaysClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = TargetVpnGatewaysClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = TargetVpnGatewaysClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = TargetVpnGatewaysClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = TargetVpnGatewaysClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = TargetVpnGatewaysClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = TargetVpnGatewaysClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = TargetVpnGatewaysClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = TargetVpnGatewaysClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = TargetVpnGatewaysClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = TargetVpnGatewaysClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = TargetVpnGatewaysClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = TargetVpnGatewaysClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = TargetVpnGatewaysClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.TargetVpnGatewaysTransport, '_prep_wrapped_messages') as prep: + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.TargetVpnGatewaysTransport, '_prep_wrapped_messages') as prep: + transport_class = TargetVpnGatewaysClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = TargetVpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_url_maps.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_url_maps.py new file mode 100644 index 000000000..07713403d --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_url_maps.py @@ -0,0 +1,1992 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.url_maps import UrlMapsClient +from google.cloud.compute_v1.services.url_maps import pagers +from google.cloud.compute_v1.services.url_maps import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert UrlMapsClient._get_default_mtls_endpoint(None) is None + assert UrlMapsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert UrlMapsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert UrlMapsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert UrlMapsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert UrlMapsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + UrlMapsClient, +]) +def test_url_maps_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.UrlMapsRestTransport, "rest"), +]) +def test_url_maps_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + UrlMapsClient, +]) +def test_url_maps_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_url_maps_client_get_transport_class(): + transport = UrlMapsClient.get_transport_class() + available_transports = [ + transports.UrlMapsRestTransport, + ] + assert transport in available_transports + + transport = UrlMapsClient.get_transport_class("rest") + assert transport == transports.UrlMapsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (UrlMapsClient, transports.UrlMapsRestTransport, "rest"), +]) +@mock.patch.object(UrlMapsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(UrlMapsClient)) +def test_url_maps_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(UrlMapsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(UrlMapsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (UrlMapsClient, transports.UrlMapsRestTransport, "rest", "true"), + (UrlMapsClient, transports.UrlMapsRestTransport, "rest", "false"), +]) +@mock.patch.object(UrlMapsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(UrlMapsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_url_maps_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (UrlMapsClient, transports.UrlMapsRestTransport, "rest"), +]) +def test_url_maps_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (UrlMapsClient, transports.UrlMapsRestTransport, "rest"), +]) +def test_url_maps_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListUrlMapsRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapsAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapsAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListUrlMapsRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapsAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapsAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/urlMaps" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListUrlMapsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.UrlMapsAggregatedList( + items={ + 'a':compute.UrlMapsScopedList(), + 'b':compute.UrlMapsScopedList(), + 'c':compute.UrlMapsScopedList(), + }, + next_page_token='abc', + ), + compute.UrlMapsAggregatedList( + items={}, + next_page_token='def', + ), + compute.UrlMapsAggregatedList( + items={ + 'g':compute.UrlMapsScopedList(), + }, + next_page_token='ghi', + ), + compute.UrlMapsAggregatedList( + items={ + 'h':compute.UrlMapsScopedList(), + 'i':compute.UrlMapsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.UrlMapsAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.UrlMapsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.UrlMapsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.UrlMapsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "url_map": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + url_map='url_map_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteUrlMapRequest(), + project='project_value', + url_map='url_map_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMap( + creation_timestamp='creation_timestamp_value', + default_service='default_service_value', + description='description_value', + fingerprint='fingerprint_value', + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMap.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.UrlMap) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.default_service == 'default_service_value' + assert response.description == 'description_value' + assert response.fingerprint == 'fingerprint_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMap() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMap.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "url_map": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + url_map='url_map_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetUrlMapRequest(), + project='project_value', + url_map='url_map_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertUrlMapRequest(), + project='project_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + + +def test_invalidate_cache_rest(transport: str = 'rest', request_type=compute.InvalidateCacheUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["cache_invalidation_rule_resource"] = compute.CacheInvalidationRule(host='host_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.invalidate_cache(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_invalidate_cache_rest_bad_request(transport: str = 'rest', request_type=compute.InvalidateCacheUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["cache_invalidation_rule_resource"] = compute.CacheInvalidationRule(host='host_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.invalidate_cache(request) + + +def test_invalidate_cache_rest_from_dict(): + test_invalidate_cache_rest(request_type=dict) + + +def test_invalidate_cache_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "url_map": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + url_map='url_map_value', + cache_invalidation_rule_resource=compute.CacheInvalidationRule(host='host_value'), + ) + mock_args.update(sample_request) + client.invalidate_cache(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps/{url_map}/invalidateCache" % client.transport._host, args[1]) + + +def test_invalidate_cache_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.invalidate_cache( + compute.InvalidateCacheUrlMapRequest(), + project='project_value', + url_map='url_map_value', + cache_invalidation_rule_resource=compute.CacheInvalidationRule(host='host_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListUrlMapsRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListUrlMapsRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListUrlMapsRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.UrlMapList( + items=[ + compute.UrlMap(), + compute.UrlMap(), + compute.UrlMap(), + ], + next_page_token='abc', + ), + compute.UrlMapList( + items=[], + next_page_token='def', + ), + compute.UrlMapList( + items=[ + compute.UrlMap(), + ], + next_page_token='ghi', + ), + compute.UrlMapList( + items=[ + compute.UrlMap(), + compute.UrlMap(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.UrlMapList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.UrlMap) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_patch_rest(transport: str = 'rest', request_type=compute.PatchUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.patch(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_patch_rest_bad_request(transport: str = 'rest', request_type=compute.PatchUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.patch(request) + + +def test_patch_rest_from_dict(): + test_patch_rest(request_type=dict) + + +def test_patch_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "url_map": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.patch(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_patch_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.patch( + compute.PatchUrlMapRequest(), + project='project_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + + +def test_update_rest(transport: str = 'rest', request_type=compute.UpdateUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_update_rest_bad_request(transport: str = 'rest', request_type=compute.UpdateUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["url_map_resource"] = compute.UrlMap(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update(request) + + +def test_update_rest_from_dict(): + test_update_rest(request_type=dict) + + +def test_update_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "url_map": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.update(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps/{url_map}" % client.transport._host, args[1]) + + +def test_update_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update( + compute.UpdateUrlMapRequest(), + project='project_value', + url_map='url_map_value', + url_map_resource=compute.UrlMap(creation_timestamp='creation_timestamp_value'), + ) + + +def test_validate_rest(transport: str = 'rest', request_type=compute.ValidateUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["url_maps_validate_request_resource"] = compute.UrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapsValidateResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapsValidateResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.validate(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.UrlMapsValidateResponse) + + +def test_validate_rest_bad_request(transport: str = 'rest', request_type=compute.ValidateUrlMapRequest): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "url_map": "sample2"} + request_init["url_maps_validate_request_resource"] = compute.UrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.validate(request) + + +def test_validate_rest_from_dict(): + test_validate_rest(request_type=dict) + + +def test_validate_rest_flattened(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.UrlMapsValidateResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.UrlMapsValidateResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "url_map": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + url_map='url_map_value', + url_maps_validate_request_resource=compute.UrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')), + ) + mock_args.update(sample_request) + client.validate(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/global/urlMaps/{url_map}/validate" % client.transport._host, args[1]) + + +def test_validate_rest_flattened_error(transport: str = 'rest'): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.validate( + compute.ValidateUrlMapRequest(), + project='project_value', + url_map='url_map_value', + url_maps_validate_request_resource=compute.UrlMapsValidateRequest(resource=compute.UrlMap(creation_timestamp='creation_timestamp_value')), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.UrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.UrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = UrlMapsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.UrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = UrlMapsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.UrlMapsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = UrlMapsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.UrlMapsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_url_maps_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.UrlMapsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_url_maps_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.url_maps.transports.UrlMapsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.UrlMapsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'invalidate_cache', + 'list', + 'patch', + 'update', + 'validate', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_url_maps_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.url_maps.transports.UrlMapsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.UrlMapsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_url_maps_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.url_maps.transports.UrlMapsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.UrlMapsTransport() + adc.assert_called_once() + + +def test_url_maps_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + UrlMapsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_url_maps_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.UrlMapsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_url_maps_host_no_port(): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_url_maps_host_with_port(): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = UrlMapsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = UrlMapsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = UrlMapsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = UrlMapsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = UrlMapsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = UrlMapsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = UrlMapsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = UrlMapsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = UrlMapsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = UrlMapsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = UrlMapsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = UrlMapsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = UrlMapsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = UrlMapsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = UrlMapsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.UrlMapsTransport, '_prep_wrapped_messages') as prep: + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.UrlMapsTransport, '_prep_wrapped_messages') as prep: + transport_class = UrlMapsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = UrlMapsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_vpn_gateways.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_vpn_gateways.py new file mode 100644 index 000000000..62fe6b679 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_vpn_gateways.py @@ -0,0 +1,1806 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.vpn_gateways import VpnGatewaysClient +from google.cloud.compute_v1.services.vpn_gateways import pagers +from google.cloud.compute_v1.services.vpn_gateways import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert VpnGatewaysClient._get_default_mtls_endpoint(None) is None + assert VpnGatewaysClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert VpnGatewaysClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert VpnGatewaysClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert VpnGatewaysClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert VpnGatewaysClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + VpnGatewaysClient, +]) +def test_vpn_gateways_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.VpnGatewaysRestTransport, "rest"), +]) +def test_vpn_gateways_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + VpnGatewaysClient, +]) +def test_vpn_gateways_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_vpn_gateways_client_get_transport_class(): + transport = VpnGatewaysClient.get_transport_class() + available_transports = [ + transports.VpnGatewaysRestTransport, + ] + assert transport in available_transports + + transport = VpnGatewaysClient.get_transport_class("rest") + assert transport == transports.VpnGatewaysRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VpnGatewaysClient, transports.VpnGatewaysRestTransport, "rest"), +]) +@mock.patch.object(VpnGatewaysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VpnGatewaysClient)) +def test_vpn_gateways_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(VpnGatewaysClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(VpnGatewaysClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (VpnGatewaysClient, transports.VpnGatewaysRestTransport, "rest", "true"), + (VpnGatewaysClient, transports.VpnGatewaysRestTransport, "rest", "false"), +]) +@mock.patch.object(VpnGatewaysClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VpnGatewaysClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_vpn_gateways_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VpnGatewaysClient, transports.VpnGatewaysRestTransport, "rest"), +]) +def test_vpn_gateways_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VpnGatewaysClient, transports.VpnGatewaysRestTransport, "rest"), +]) +def test_vpn_gateways_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListVpnGatewaysRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGatewayAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGatewayAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListVpnGatewaysRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGatewayAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGatewayAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/vpnGateways" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListVpnGatewaysRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.VpnGatewayAggregatedList( + items={ + 'a':compute.VpnGatewaysScopedList(), + 'b':compute.VpnGatewaysScopedList(), + 'c':compute.VpnGatewaysScopedList(), + }, + next_page_token='abc', + ), + compute.VpnGatewayAggregatedList( + items={}, + next_page_token='def', + ), + compute.VpnGatewayAggregatedList( + items={ + 'g':compute.VpnGatewaysScopedList(), + }, + next_page_token='ghi', + ), + compute.VpnGatewayAggregatedList( + items={ + 'h':compute.VpnGatewaysScopedList(), + 'i':compute.VpnGatewaysScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.VpnGatewayAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.VpnGatewaysScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.VpnGatewaysScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.VpnGatewaysScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + vpn_gateway='vpn_gateway_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteVpnGatewayRequest(), + project='project_value', + region='region_value', + vpn_gateway='vpn_gateway_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGateway( + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + label_fingerprint='label_fingerprint_value', + name='name_value', + network='network_value', + region='region_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGateway.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.VpnGateway) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.label_fingerprint == 'label_fingerprint_value' + assert response.name == 'name_value' + assert response.network == 'network_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGateway() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGateway.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + vpn_gateway='vpn_gateway_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetVpnGatewayRequest(), + project='project_value', + region='region_value', + vpn_gateway='vpn_gateway_value', + ) + + +def test_get_status_rest(transport: str = 'rest', request_type=compute.GetStatusVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGatewaysGetStatusResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGatewaysGetStatusResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_status(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.VpnGatewaysGetStatusResponse) + + +def test_get_status_rest_bad_request(transport: str = 'rest', request_type=compute.GetStatusVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_status(request) + + +def test_get_status_rest_from_dict(): + test_get_status_rest(request_type=dict) + + +def test_get_status_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGatewaysGetStatusResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGatewaysGetStatusResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "vpn_gateway": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + vpn_gateway='vpn_gateway_value', + ) + mock_args.update(sample_request) + client.get_status(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnGateways/{vpn_gateway}/getStatus" % client.transport._host, args[1]) + + +def test_get_status_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_status( + compute.GetStatusVpnGatewayRequest(), + project='project_value', + region='region_value', + vpn_gateway='vpn_gateway_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["vpn_gateway_resource"] = compute.VpnGateway(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["vpn_gateway_resource"] = compute.VpnGateway(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + vpn_gateway_resource=compute.VpnGateway(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnGateways" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertVpnGatewayRequest(), + project='project_value', + region='region_value', + vpn_gateway_resource=compute.VpnGateway(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListVpnGatewaysRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGatewayList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGatewayList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListVpnGatewaysRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnGatewayList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnGatewayList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnGateways" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListVpnGatewaysRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.VpnGatewayList( + items=[ + compute.VpnGateway(), + compute.VpnGateway(), + compute.VpnGateway(), + ], + next_page_token='abc', + ), + compute.VpnGatewayList( + items=[], + next_page_token='def', + ), + compute.VpnGatewayList( + items=[ + compute.VpnGateway(), + ], + next_page_token='ghi', + ), + compute.VpnGatewayList( + items=[ + compute.VpnGateway(), + compute.VpnGateway(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.VpnGatewayList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.VpnGateway) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_set_labels_rest(transport: str = 'rest', request_type=compute.SetLabelsVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_labels_request_resource"] = compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_labels(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_set_labels_rest_bad_request(transport: str = 'rest', request_type=compute.SetLabelsVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["region_set_labels_request_resource"] = compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_labels(request) + + +def test_set_labels_rest_from_dict(): + test_set_labels_rest(request_type=dict) + + +def test_set_labels_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + region_set_labels_request_resource=compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + mock_args.update(sample_request) + client.set_labels(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnGateways/{resource}/setLabels" % client.transport._host, args[1]) + + +def test_set_labels_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_labels( + compute.SetLabelsVpnGatewayRequest(), + project='project_value', + region='region_value', + resource='resource_value', + region_set_labels_request_resource=compute.RegionSetLabelsRequest(label_fingerprint='label_fingerprint_value'), + ) + + +def test_test_iam_permissions_rest(transport: str = 'rest', request_type=compute.TestIamPermissionsVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.TestPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=compute.TestIamPermissionsVpnGatewayRequest): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "resource": "sample3"} + request_init["test_permissions_request_resource"] = compute.TestPermissionsRequest(permissions=['permissions_value']) + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_from_dict(): + test_test_iam_permissions_rest(request_type=dict) + + +def test_test_iam_permissions_rest_flattened(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.TestPermissionsResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.TestPermissionsResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "resource": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + mock_args.update(sample_request) + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnGateways/{resource}/testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + compute.TestIamPermissionsVpnGatewayRequest(), + project='project_value', + region='region_value', + resource='resource_value', + test_permissions_request_resource=compute.TestPermissionsRequest(permissions=['permissions_value']), + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.VpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.VpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VpnGatewaysClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.VpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VpnGatewaysClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.VpnGatewaysRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = VpnGatewaysClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.VpnGatewaysRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_vpn_gateways_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.VpnGatewaysTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_vpn_gateways_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.vpn_gateways.transports.VpnGatewaysTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.VpnGatewaysTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'get_status', + 'insert', + 'list', + 'set_labels', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_vpn_gateways_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.vpn_gateways.transports.VpnGatewaysTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VpnGatewaysTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_vpn_gateways_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.vpn_gateways.transports.VpnGatewaysTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VpnGatewaysTransport() + adc.assert_called_once() + + +def test_vpn_gateways_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VpnGatewaysClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_vpn_gateways_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.VpnGatewaysRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_vpn_gateways_host_no_port(): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_vpn_gateways_host_with_port(): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = VpnGatewaysClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = VpnGatewaysClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = VpnGatewaysClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = VpnGatewaysClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = VpnGatewaysClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = VpnGatewaysClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = VpnGatewaysClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = VpnGatewaysClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = VpnGatewaysClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = VpnGatewaysClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = VpnGatewaysClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = VpnGatewaysClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = VpnGatewaysClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = VpnGatewaysClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = VpnGatewaysClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.VpnGatewaysTransport, '_prep_wrapped_messages') as prep: + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.VpnGatewaysTransport, '_prep_wrapped_messages') as prep: + transport_class = VpnGatewaysClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = VpnGatewaysClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_vpn_tunnels.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_vpn_tunnels.py new file mode 100644 index 000000000..ece022a39 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_vpn_tunnels.py @@ -0,0 +1,1454 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.vpn_tunnels import VpnTunnelsClient +from google.cloud.compute_v1.services.vpn_tunnels import pagers +from google.cloud.compute_v1.services.vpn_tunnels import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert VpnTunnelsClient._get_default_mtls_endpoint(None) is None + assert VpnTunnelsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert VpnTunnelsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert VpnTunnelsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert VpnTunnelsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert VpnTunnelsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + VpnTunnelsClient, +]) +def test_vpn_tunnels_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.VpnTunnelsRestTransport, "rest"), +]) +def test_vpn_tunnels_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + VpnTunnelsClient, +]) +def test_vpn_tunnels_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_vpn_tunnels_client_get_transport_class(): + transport = VpnTunnelsClient.get_transport_class() + available_transports = [ + transports.VpnTunnelsRestTransport, + ] + assert transport in available_transports + + transport = VpnTunnelsClient.get_transport_class("rest") + assert transport == transports.VpnTunnelsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VpnTunnelsClient, transports.VpnTunnelsRestTransport, "rest"), +]) +@mock.patch.object(VpnTunnelsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VpnTunnelsClient)) +def test_vpn_tunnels_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(VpnTunnelsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(VpnTunnelsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (VpnTunnelsClient, transports.VpnTunnelsRestTransport, "rest", "true"), + (VpnTunnelsClient, transports.VpnTunnelsRestTransport, "rest", "false"), +]) +@mock.patch.object(VpnTunnelsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(VpnTunnelsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_vpn_tunnels_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VpnTunnelsClient, transports.VpnTunnelsRestTransport, "rest"), +]) +def test_vpn_tunnels_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (VpnTunnelsClient, transports.VpnTunnelsRestTransport, "rest"), +]) +def test_vpn_tunnels_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_aggregated_list_rest(transport: str = 'rest', request_type=compute.AggregatedListVpnTunnelsRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnTunnelAggregatedList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + unreachables=['unreachables_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnTunnelAggregatedList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.aggregated_list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.AggregatedListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + assert response.unreachables == ['unreachables_value'] + + +def test_aggregated_list_rest_bad_request(transport: str = 'rest', request_type=compute.AggregatedListVpnTunnelsRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.aggregated_list(request) + + +def test_aggregated_list_rest_from_dict(): + test_aggregated_list_rest(request_type=dict) + + +def test_aggregated_list_rest_flattened(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnTunnelAggregatedList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnTunnelAggregatedList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.aggregated_list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/aggregated/vpnTunnels" % client.transport._host, args[1]) + + +def test_aggregated_list_rest_flattened_error(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.aggregated_list( + compute.AggregatedListVpnTunnelsRequest(), + project='project_value', + ) + + +def test_aggregated_list_rest_pager(): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.VpnTunnelAggregatedList( + items={ + 'a':compute.VpnTunnelsScopedList(), + 'b':compute.VpnTunnelsScopedList(), + 'c':compute.VpnTunnelsScopedList(), + }, + next_page_token='abc', + ), + compute.VpnTunnelAggregatedList( + items={}, + next_page_token='def', + ), + compute.VpnTunnelAggregatedList( + items={ + 'g':compute.VpnTunnelsScopedList(), + }, + next_page_token='ghi', + ), + compute.VpnTunnelAggregatedList( + items={ + 'h':compute.VpnTunnelsScopedList(), + 'i':compute.VpnTunnelsScopedList(), + }, + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.VpnTunnelAggregatedList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.aggregated_list(request=sample_request) + + assert isinstance(pager.get('a'), compute.VpnTunnelsScopedList) + assert pager.get('h') is None + + results = list(pager) + assert len(results) == 6 + assert all( + isinstance(i, tuple) + for i in results) + for result in results: + assert isinstance(result, tuple) + assert tuple(type(t) for t in result) == (str, compute.VpnTunnelsScopedList) + + assert pager.get('a') is None + assert isinstance(pager.get('h'), compute.VpnTunnelsScopedList) + + pages = list(client.aggregated_list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteVpnTunnelRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_tunnel": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteVpnTunnelRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_tunnel": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "vpn_tunnel": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + vpn_tunnel='vpn_tunnel_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnTunnels/{vpn_tunnel}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteVpnTunnelRequest(), + project='project_value', + region='region_value', + vpn_tunnel='vpn_tunnel_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetVpnTunnelRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_tunnel": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnTunnel( + creation_timestamp='creation_timestamp_value', + description='description_value', + detailed_status='detailed_status_value', + id=205, + ike_version=1182, + kind='kind_value', + local_traffic_selector=['local_traffic_selector_value'], + name='name_value', + peer_external_gateway='peer_external_gateway_value', + peer_external_gateway_interface=3279, + peer_gcp_gateway='peer_gcp_gateway_value', + peer_ip='peer_ip_value', + region='region_value', + remote_traffic_selector=['remote_traffic_selector_value'], + router='router_value', + self_link='self_link_value', + shared_secret='shared_secret_value', + shared_secret_hash='shared_secret_hash_value', + status='status_value', + target_vpn_gateway='target_vpn_gateway_value', + vpn_gateway='vpn_gateway_value', + vpn_gateway_interface=2229, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnTunnel.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.VpnTunnel) + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.detailed_status == 'detailed_status_value' + assert response.id == 205 + assert response.ike_version == 1182 + assert response.kind == 'kind_value' + assert response.local_traffic_selector == ['local_traffic_selector_value'] + assert response.name == 'name_value' + assert response.peer_external_gateway == 'peer_external_gateway_value' + assert response.peer_external_gateway_interface == 3279 + assert response.peer_gcp_gateway == 'peer_gcp_gateway_value' + assert response.peer_ip == 'peer_ip_value' + assert response.region == 'region_value' + assert response.remote_traffic_selector == ['remote_traffic_selector_value'] + assert response.router == 'router_value' + assert response.self_link == 'self_link_value' + assert response.shared_secret == 'shared_secret_value' + assert response.shared_secret_hash == 'shared_secret_hash_value' + assert response.status == 'status_value' + assert response.target_vpn_gateway == 'target_vpn_gateway_value' + assert response.vpn_gateway == 'vpn_gateway_value' + assert response.vpn_gateway_interface == 2229 + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetVpnTunnelRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2", "vpn_tunnel": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnTunnel() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnTunnel.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2", "vpn_tunnel": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + vpn_tunnel='vpn_tunnel_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnTunnels/{vpn_tunnel}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetVpnTunnelRequest(), + project='project_value', + region='region_value', + vpn_tunnel='vpn_tunnel_value', + ) + + +def test_insert_rest(transport: str = 'rest', request_type=compute.InsertVpnTunnelRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["vpn_tunnel_resource"] = compute.VpnTunnel(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.insert(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_insert_rest_bad_request(transport: str = 'rest', request_type=compute.InsertVpnTunnelRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request_init["vpn_tunnel_resource"] = compute.VpnTunnel(creation_timestamp='creation_timestamp_value') + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.insert(request) + + +def test_insert_rest_from_dict(): + test_insert_rest(request_type=dict) + + +def test_insert_rest_flattened(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + vpn_tunnel_resource=compute.VpnTunnel(creation_timestamp='creation_timestamp_value'), + ) + mock_args.update(sample_request) + client.insert(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnTunnels" % client.transport._host, args[1]) + + +def test_insert_rest_flattened_error(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.insert( + compute.InsertVpnTunnelRequest(), + project='project_value', + region='region_value', + vpn_tunnel_resource=compute.VpnTunnel(creation_timestamp='creation_timestamp_value'), + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListVpnTunnelsRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnTunnelList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnTunnelList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListVpnTunnelsRequest): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "region": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.VpnTunnelList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.VpnTunnelList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "region": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + region='region_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/regions/{region}/vpnTunnels" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListVpnTunnelsRequest(), + project='project_value', + region='region_value', + ) + + +def test_list_rest_pager(): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.VpnTunnelList( + items=[ + compute.VpnTunnel(), + compute.VpnTunnel(), + compute.VpnTunnel(), + ], + next_page_token='abc', + ), + compute.VpnTunnelList( + items=[], + next_page_token='def', + ), + compute.VpnTunnelList( + items=[ + compute.VpnTunnel(), + ], + next_page_token='ghi', + ), + compute.VpnTunnelList( + items=[ + compute.VpnTunnel(), + compute.VpnTunnel(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.VpnTunnelList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "region": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.VpnTunnel) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.VpnTunnelsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.VpnTunnelsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VpnTunnelsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.VpnTunnelsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = VpnTunnelsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.VpnTunnelsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = VpnTunnelsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.VpnTunnelsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_vpn_tunnels_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.VpnTunnelsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_vpn_tunnels_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.vpn_tunnels.transports.VpnTunnelsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.VpnTunnelsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'aggregated_list', + 'delete', + 'get', + 'insert', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_vpn_tunnels_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.vpn_tunnels.transports.VpnTunnelsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VpnTunnelsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_vpn_tunnels_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.vpn_tunnels.transports.VpnTunnelsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.VpnTunnelsTransport() + adc.assert_called_once() + + +def test_vpn_tunnels_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + VpnTunnelsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_vpn_tunnels_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.VpnTunnelsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_vpn_tunnels_host_no_port(): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_vpn_tunnels_host_with_port(): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = VpnTunnelsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = VpnTunnelsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = VpnTunnelsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = VpnTunnelsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = VpnTunnelsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = VpnTunnelsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = VpnTunnelsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = VpnTunnelsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = VpnTunnelsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = VpnTunnelsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = VpnTunnelsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = VpnTunnelsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = VpnTunnelsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = VpnTunnelsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = VpnTunnelsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.VpnTunnelsTransport, '_prep_wrapped_messages') as prep: + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.VpnTunnelsTransport, '_prep_wrapped_messages') as prep: + transport_class = VpnTunnelsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = VpnTunnelsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_zone_operations.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_zone_operations.py new file mode 100644 index 000000000..4f7d139d9 --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_zone_operations.py @@ -0,0 +1,1223 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.zone_operations import ZoneOperationsClient +from google.cloud.compute_v1.services.zone_operations import pagers +from google.cloud.compute_v1.services.zone_operations import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ZoneOperationsClient._get_default_mtls_endpoint(None) is None + assert ZoneOperationsClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ZoneOperationsClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ZoneOperationsClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ZoneOperationsClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ZoneOperationsClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ZoneOperationsClient, +]) +def test_zone_operations_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ZoneOperationsRestTransport, "rest"), +]) +def test_zone_operations_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ZoneOperationsClient, +]) +def test_zone_operations_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_zone_operations_client_get_transport_class(): + transport = ZoneOperationsClient.get_transport_class() + available_transports = [ + transports.ZoneOperationsRestTransport, + ] + assert transport in available_transports + + transport = ZoneOperationsClient.get_transport_class("rest") + assert transport == transports.ZoneOperationsRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"), +]) +@mock.patch.object(ZoneOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ZoneOperationsClient)) +def test_zone_operations_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ZoneOperationsClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ZoneOperationsClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest", "true"), + (ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest", "false"), +]) +@mock.patch.object(ZoneOperationsClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ZoneOperationsClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_zone_operations_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"), +]) +def test_zone_operations_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ZoneOperationsClient, transports.ZoneOperationsRestTransport, "rest"), +]) +def test_zone_operations_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_delete_rest(transport: str = 'rest', request_type=compute.DeleteZoneOperationRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteZoneOperationResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteZoneOperationResponse.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.DeleteZoneOperationResponse) + + +def test_delete_rest_bad_request(transport: str = 'rest', request_type=compute.DeleteZoneOperationRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete(request) + + +def test_delete_rest_from_dict(): + test_delete_rest(request_type=dict) + + +def test_delete_rest_flattened(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.DeleteZoneOperationResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.DeleteZoneOperationResponse.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.delete(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/operations/{operation}" % client.transport._host, args[1]) + + +def test_delete_rest_flattened_error(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete( + compute.DeleteZoneOperationRequest(), + project='project_value', + zone='zone_value', + operation='operation_value', + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetZoneOperationRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetZoneOperationRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/operations/{operation}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetZoneOperationRequest(), + project='project_value', + zone='zone_value', + operation='operation_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListZoneOperationsRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListZoneOperationsRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.OperationList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.OperationList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/operations" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListZoneOperationsRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest_pager(): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + compute.Operation(), + ], + next_page_token='abc', + ), + compute.OperationList( + items=[], + next_page_token='def', + ), + compute.OperationList( + items=[ + compute.Operation(), + ], + next_page_token='ghi', + ), + compute.OperationList( + items=[ + compute.Operation(), + compute.Operation(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.OperationList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1", "zone": "sample2"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Operation) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_wait_rest(transport: str = 'rest', request_type=compute.WaitZoneOperationRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation( + client_operation_id='client_operation_id_value', + creation_timestamp='creation_timestamp_value', + description='description_value', + end_time='end_time_value', + http_error_message='http_error_message_value', + http_error_status_code=2374, + id=205, + insert_time='insert_time_value', + kind='kind_value', + name='name_value', + operation_group_id='operation_group_id_value', + operation_type='operation_type_value', + progress=885, + region='region_value', + self_link='self_link_value', + start_time='start_time_value', + status=compute.Operation.Status.DONE, + status_message='status_message_value', + target_id=947, + target_link='target_link_value', + user='user_value', + zone='zone_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.wait(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Operation) + assert response.client_operation_id == 'client_operation_id_value' + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.end_time == 'end_time_value' + assert response.http_error_message == 'http_error_message_value' + assert response.http_error_status_code == 2374 + assert response.id == 205 + assert response.insert_time == 'insert_time_value' + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.operation_group_id == 'operation_group_id_value' + assert response.operation_type == 'operation_type_value' + assert response.progress == 885 + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.start_time == 'start_time_value' + assert response.status == compute.Operation.Status.DONE + assert response.status_message == 'status_message_value' + assert response.target_id == 947 + assert response.target_link == 'target_link_value' + assert response.user == 'user_value' + assert response.zone == 'zone_value' + + +def test_wait_rest_bad_request(transport: str = 'rest', request_type=compute.WaitZoneOperationRequest): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.wait(request) + + +def test_wait_rest_from_dict(): + test_wait_rest(request_type=dict) + + +def test_wait_rest_flattened(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Operation() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Operation.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2", "operation": "sample3"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + operation='operation_value', + ) + mock_args.update(sample_request) + client.wait(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}/operations/{operation}/wait" % client.transport._host, args[1]) + + +def test_wait_rest_flattened_error(transport: str = 'rest'): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.wait( + compute.WaitZoneOperationRequest(), + project='project_value', + zone='zone_value', + operation='operation_value', + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ZoneOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ZoneOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ZoneOperationsClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ZoneOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ZoneOperationsClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ZoneOperationsRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ZoneOperationsClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ZoneOperationsRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_zone_operations_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ZoneOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_zone_operations_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ZoneOperationsTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'delete', + 'get', + 'list', + 'wait', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_zone_operations_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ZoneOperationsTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_zone_operations_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.zone_operations.transports.ZoneOperationsTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ZoneOperationsTransport() + adc.assert_called_once() + + +def test_zone_operations_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ZoneOperationsClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_zone_operations_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ZoneOperationsRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_zone_operations_host_no_port(): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_zone_operations_host_with_port(): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ZoneOperationsClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ZoneOperationsClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ZoneOperationsClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ZoneOperationsClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ZoneOperationsClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ZoneOperationsClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ZoneOperationsClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ZoneOperationsClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ZoneOperationsClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ZoneOperationsClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ZoneOperationsClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ZoneOperationsClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ZoneOperationsClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ZoneOperationsClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ZoneOperationsClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ZoneOperationsTransport, '_prep_wrapped_messages') as prep: + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ZoneOperationsTransport, '_prep_wrapped_messages') as prep: + transport_class = ZoneOperationsClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ZoneOperationsClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() diff --git a/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_zones.py b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_zones.py new file mode 100644 index 000000000..bb64779ba --- /dev/null +++ b/owl-bot-staging/v1/tests/unit/gapic/compute_v1/test_zones.py @@ -0,0 +1,937 @@ +# -*- coding: utf-8 -*- +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule + +from requests import Response +from requests import Request +from requests.sessions import Session + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.compute_v1.services.zones import ZonesClient +from google.cloud.compute_v1.services.zones import pagers +from google.cloud.compute_v1.services.zones import transports +from google.cloud.compute_v1.types import compute +from google.oauth2 import service_account +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert ZonesClient._get_default_mtls_endpoint(None) is None + assert ZonesClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert ZonesClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert ZonesClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert ZonesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert ZonesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class", [ + ZonesClient, +]) +def test_zones_client_from_service_account_info(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.ZonesRestTransport, "rest"), +]) +def test_zones_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class", [ + ZonesClient, +]) +def test_zones_client_from_service_account_file(client_class): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json") + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_zones_client_get_transport_class(): + transport = ZonesClient.get_transport_class() + available_transports = [ + transports.ZonesRestTransport, + ] + assert transport in available_transports + + transport = ZonesClient.get_transport_class("rest") + assert transport == transports.ZonesRestTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ZonesClient, transports.ZonesRestTransport, "rest"), +]) +@mock.patch.object(ZonesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ZonesClient)) +def test_zones_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(ZonesClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(ZonesClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class() + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class() + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (ZonesClient, transports.ZonesRestTransport, "rest", "true"), + (ZonesClient, transports.ZonesRestTransport, "rest", "false"), +]) +@mock.patch.object(ZonesClient, "DEFAULT_ENDPOINT", modify_default_endpoint(ZonesClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_zones_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ZonesClient, transports.ZonesRestTransport, "rest"), +]) +def test_zones_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (ZonesClient, transports.ZonesRestTransport, "rest"), +]) +def test_zones_client_client_options_credentials_file(client_class, transport_class, transport_name): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + ) + + +def test_get_rest(transport: str = 'rest', request_type=compute.GetZoneRequest): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Zone( + available_cpu_platforms=['available_cpu_platforms_value'], + creation_timestamp='creation_timestamp_value', + description='description_value', + id=205, + kind='kind_value', + name='name_value', + region='region_value', + self_link='self_link_value', + status='status_value', + supports_pzs=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Zone.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, compute.Zone) + assert response.available_cpu_platforms == ['available_cpu_platforms_value'] + assert response.creation_timestamp == 'creation_timestamp_value' + assert response.description == 'description_value' + assert response.id == 205 + assert response.kind == 'kind_value' + assert response.name == 'name_value' + assert response.region == 'region_value' + assert response.self_link == 'self_link_value' + assert response.status == 'status_value' + assert response.supports_pzs is True + + +def test_get_rest_bad_request(transport: str = 'rest', request_type=compute.GetZoneRequest): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1", "zone": "sample2"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get(request) + + +def test_get_rest_from_dict(): + test_get_rest(request_type=dict) + + +def test_get_rest_flattened(transport: str = 'rest'): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.Zone() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.Zone.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1", "zone": "sample2"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + zone='zone_value', + ) + mock_args.update(sample_request) + client.get(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones/{zone}" % client.transport._host, args[1]) + + +def test_get_rest_flattened_error(transport: str = 'rest'): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get( + compute.GetZoneRequest(), + project='project_value', + zone='zone_value', + ) + + +def test_list_rest(transport: str = 'rest', request_type=compute.ListZonesRequest): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ZoneList( + id='id_value', + kind='kind_value', + next_page_token='next_page_token_value', + self_link='self_link_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ZoneList.to_json(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListPager) + assert response.id == 'id_value' + assert response.kind == 'kind_value' + assert response.next_page_token == 'next_page_token_value' + assert response.self_link == 'self_link_value' + + +def test_list_rest_bad_request(transport: str = 'rest', request_type=compute.ListZonesRequest): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"project": "sample1"} + request = request_type(request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list(request) + + +def test_list_rest_from_dict(): + test_list_rest(request_type=dict) + + +def test_list_rest_flattened(transport: str = 'rest'): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # Designate an appropriate value for the returned response. + return_value = compute.ZoneList() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = compute.ZoneList.to_json(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + # get arguments that satisfy an http rule for this method + sample_request = {"project": "sample1"} + + # get truthy value for each flattened field + mock_args = dict( + project='project_value', + ) + mock_args.update(sample_request) + client.list(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("https://%s/compute/v1/projects/{project}/zones" % client.transport._host, args[1]) + + +def test_list_rest_flattened_error(transport: str = 'rest'): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list( + compute.ListZonesRequest(), + project='project_value', + ) + + +def test_list_rest_pager(): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + compute.ZoneList( + items=[ + compute.Zone(), + compute.Zone(), + compute.Zone(), + ], + next_page_token='abc', + ), + compute.ZoneList( + items=[], + next_page_token='def', + ), + compute.ZoneList( + items=[ + compute.Zone(), + ], + next_page_token='ghi', + ), + compute.ZoneList( + items=[ + compute.Zone(), + compute.Zone(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(compute.ZoneList.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {"project": "sample1"} + + pager = client.list(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, compute.Zone) + for i in results) + + pages = list(client.list(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.ZonesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.ZonesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ZonesClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.ZonesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = ZonesClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.ZonesRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = ZonesClient(transport=transport) + assert client.transport is transport + + +@pytest.mark.parametrize("transport_class", [ + transports.ZonesRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +def test_zones_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.ZonesTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_zones_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.compute_v1.services.zones.transports.ZonesTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.ZonesTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'get', + 'list', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + +def test_zones_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.compute_v1.services.zones.transports.ZonesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ZonesTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id="octopus", + ) + + +def test_zones_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.compute_v1.services.zones.transports.ZonesTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.ZonesTransport() + adc.assert_called_once() + + +def test_zones_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + ZonesClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/compute.readonly', + 'https://www.googleapis.com/auth/compute', + 'https://www.googleapis.com/auth/cloud-platform', +), + quota_project_id=None, + ) + + +def test_zones_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.ZonesRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_zones_host_no_port(): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com'), + ) + assert client.transport._host == 'compute.googleapis.com:443' + + +def test_zones_host_with_port(): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='compute.googleapis.com:8000'), + ) + assert client.transport._host == 'compute.googleapis.com:8000' + + +def test_common_billing_account_path(): + billing_account = "squid" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = ZonesClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "clam", + } + path = ZonesClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = ZonesClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "whelk" + expected = "folders/{folder}".format(folder=folder, ) + actual = ZonesClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "octopus", + } + path = ZonesClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = ZonesClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "oyster" + expected = "organizations/{organization}".format(organization=organization, ) + actual = ZonesClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "nudibranch", + } + path = ZonesClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = ZonesClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "cuttlefish" + expected = "projects/{project}".format(project=project, ) + actual = ZonesClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "mussel", + } + path = ZonesClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = ZonesClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "winkle" + location = "nautilus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = ZonesClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "scallop", + "location": "abalone", + } + path = ZonesClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = ZonesClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_withDEFAULT_CLIENT_INFO(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.ZonesTransport, '_prep_wrapped_messages') as prep: + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.ZonesTransport, '_prep_wrapped_messages') as prep: + transport_class = ZonesClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +def test_transport_close(): + transports = { + "rest": "_session", + } + + for transport, close_name in transports.items(): + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + ] + for transport in transports: + client = ZonesClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called()